summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 10:13:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 10:13:35 -0700
commit4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (patch)
tree3bc9729eabe79c6164cd29a5d605000bc82bf837
parent5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (diff)
parentb8d4a5bf6a049303a29a3275f463f09a490b50ea (diff)
Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe: - Major bit is Kents prep work for immutable bio vecs. - Stable candidate fix for a scheduling-while-atomic in the queue bypass operation. - Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging discard bios. - Tejuns changes to convert the writeback thread pool to the generic workqueue mechanism. - Runtime PM framework, SCSI patches exists on top of these in James' tree. - A few random fixes. * 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits) relay: move remove_buf_file inside relay_close_buf partitions/efi.c: replace useless kzalloc's by kmalloc's fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read() block: fix max discard sectors limit blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Documentation: cfq-iosched: update documentation help for cfq tunables writeback: expose the bdi_wq workqueue writeback: replace custom worker pool implementation with unbound workqueue writeback: remove unused bdi_pending_list aoe: Fix unitialized var usage bio-integrity: Add explicit field for owner of bip_buf block: Add an explicit bio flag for bios that own their bvec block: Add bio_alloc_pages() block: Convert some code to bio_for_each_segment_all() block: Add bio_for_each_segment_all() bounce: Refactor __blk_queue_bounce to not use bi_io_vec raid1: use bio_copy_data() pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage pktcdvd: use bio_copy_data() block: Add bio_copy_data() ...
-rw-r--r--Documentation/block/cfq-iosched.txt47
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-core.c265
-rw-r--r--block/cfq-iosched.c7
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c26
-rw-r--r--block/partitions/efi.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/brd.c3
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/pktcdvd.c102
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-verity.c4
-rw-r--r--drivers/md/faulty.c6
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c133
-rw-r--r--drivers/md/raid10.c78
-rw-r--r--drivers/md/raid5.c49
-rw-r--r--drivers/message/fusion/mptsas.c6
-rw-r--r--drivers/s390/block/dcssblk.c3
-rw-r--r--drivers/scsi/libsas/sas_expander.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c10
-rw-r--r--fs/bio-integrity.c144
-rw-r--r--fs/bio.c366
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/direct-io.c8
-rw-r--r--fs/exofs/ore.c2
-rw-r--r--fs/exofs/ore_raid.c2
-rw-r--r--fs/fs-writeback.c102
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/logfs/dev_bdev.c5
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/bio.h115
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h29
-rw-r--r--include/trace/events/block.h12
-rw-r--r--include/trace/events/writeback.h5
-rw-r--r--kernel/relay.c2
-rw-r--r--mm/backing-dev.c259
-rw-r--r--mm/bounce.c75
-rw-r--r--mm/page_io.c1
50 files changed, 1000 insertions, 956 deletions
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index a5eb7d19a65d..9887f0414c16 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -5,7 +5,7 @@ The main aim of CFQ scheduler is to provide a fair allocation of the disk
I/O bandwidth for all the processes which requests an I/O operation.
CFQ maintains the per process queue for the processes which request I/O
-operation(syncronous requests). In case of asynchronous requests, all the
+operation(synchronous requests). In case of asynchronous requests, all the
requests from all the processes are batched together according to their
process's I/O priority.
@@ -66,6 +66,47 @@ This parameter is used to set the timeout of synchronous requests. Default
value of this is 124ms. In case to favor synchronous requests over asynchronous
one, this value should be decreased relative to fifo_expire_async.
+group_idle
+-----------
+This parameter forces idling at the CFQ group level instead of CFQ
+queue level. This was introduced after after a bottleneck was observed
+in higher end storage due to idle on sequential queue and allow dispatch
+from a single queue. The idea with this parameter is that it can be run with
+slice_idle=0 and group_idle=8, so that idling does not happen on individual
+queues in the group but happens overall on the group and thus still keeps the
+IO controller working.
+Not idling on individual queues in the group will dispatch requests from
+multiple queues in the group at the same time and achieve higher throughput
+on higher end storage.
+
+Default value for this parameter is 8ms.
+
+latency
+-------
+This parameter is used to enable/disable the latency mode of the CFQ
+scheduler. If latency mode (called low_latency) is enabled, CFQ tries
+to recompute the slice time for each process based on the target_latency set
+for the system. This favors fairness over throughput. Disabling low
+latency (setting it to 0) ignores target latency, allowing each process in the
+system to get a full time slice.
+
+By default low latency mode is enabled.
+
+target_latency
+--------------
+This parameter is used to calculate the time slice for a process if cfq's
+latency mode is enabled. It will ensure that sync requests have an estimated
+latency. But if sequential workload is higher(e.g. sequential read),
+then to meet the latency constraints, throughput may decrease because of less
+time for each process to issue I/O request before the cfq queue is switched.
+
+Though this can be overcome by disabling the latency_mode, it may increase
+the read latency for some applications. This parameter allows for changing
+target_latency through the sysfs interface which can provide the balanced
+throughput and read latency.
+
+Default value for target_latency is 300ms.
+
slice_async
-----------
This parameter is same as of slice_sync but for asynchronous queue. The
@@ -98,8 +139,8 @@ in the device exceeds this parameter. This parameter is used for synchronous
request.
In case of storage with several disk, this setting can limit the parallel
-processing of request. Therefore, increasing the value can imporve the
-performace although this can cause the latency of some I/O to increase due
+processing of request. Therefore, increasing the value can improve the
+performance although this can cause the latency of some I/O to increase due
to more number of requests.
CFQ Group scheduling
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b2b9837f9dd3..e8918ffaf96d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q,
if (!new_blkg)
return -ENOMEM;
- preloaded = !radix_tree_preload(GFP_KERNEL);
-
blk_queue_bypass_start(q);
+ preloaded = !radix_tree_preload(GFP_KERNEL);
+
/*
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
diff --git a/block/blk-core.c b/block/blk-core.c
index 7c288358a745..33c33bc99ddd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -30,6 +30,7 @@
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -159,20 +160,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
- if (unlikely(nbytes > bio->bi_size)) {
- printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __func__, nbytes, bio->bi_size);
- nbytes = bio->bi_size;
- }
-
if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags);
- bio->bi_size -= nbytes;
- bio->bi_sector += (nbytes >> 9);
-
- if (bio_integrity(bio))
- bio_integrity_advance(bio, nbytes);
+ bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
@@ -1264,6 +1255,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_put_request(struct request *rq)
+{
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+#else
+static inline void blk_pm_put_request(struct request *rq) {}
+#endif
+
/*
* queue lock must be held
*/
@@ -1274,6 +1275,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(--req->ref_count))
return;
+ blk_pm_put_request(req);
+
elv_completed_request(q, req);
/* this is a bio leak */
@@ -1597,7 +1600,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
bio->bi_rw,
- (unsigned long long)bio->bi_sector + bio_sectors(bio),
+ (unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags);
@@ -2053,6 +2056,28 @@ static void blk_account_io_done(struct request *req)
}
}
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Don't process normal requests when queue is suspended
+ * or in the process of suspending/resuming
+ */
+static struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+ return NULL;
+ else
+ return rq;
+}
+#else
+static inline struct request *blk_pm_peek_request(struct request_queue *q,
+ struct request *rq)
+{
+ return rq;
+}
+#endif
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -2075,6 +2100,11 @@ struct request *blk_peek_request(struct request_queue *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
+
+ rq = blk_pm_peek_request(q, rq);
+ if (!rq)
+ break;
+
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
@@ -2253,8 +2283,7 @@ EXPORT_SYMBOL(blk_fetch_request);
**/
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
- int total_bytes, bio_nbytes, next_idx = 0;
- struct bio *bio;
+ int total_bytes;
if (!req->bio)
return false;
@@ -2300,56 +2329,21 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
blk_account_io_completion(req, nr_bytes);
- total_bytes = bio_nbytes = 0;
- while ((bio = req->bio) != NULL) {
- int nbytes;
+ total_bytes = 0;
+ while (req->bio) {
+ struct bio *bio = req->bio;
+ unsigned bio_bytes = min(bio->bi_size, nr_bytes);
- if (nr_bytes >= bio->bi_size) {
+ if (bio_bytes == bio->bi_size)
req->bio = bio->bi_next;
- nbytes = bio->bi_size;
- req_bio_endio(req, bio, nbytes, error);
- next_idx = 0;
- bio_nbytes = 0;
- } else {
- int idx = bio->bi_idx + next_idx;
- if (unlikely(idx >= bio->bi_vcnt)) {
- blk_dump_rq_flags(req, "__end_that");
- printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
- __func__, idx, bio->bi_vcnt);
- break;
- }
+ req_bio_endio(req, bio, bio_bytes, error);
- nbytes = bio_iovec_idx(bio, idx)->bv_len;
- BIO_BUG_ON(nbytes > bio->bi_size);
+ total_bytes += bio_bytes;
+ nr_bytes -= bio_bytes;
- /*
- * not a complete bvec done
- */
- if (unlikely(nbytes > nr_bytes)) {
- bio_nbytes += nr_bytes;
- total_bytes += nr_bytes;
- break;
- }
-
- /*
- * advance to the next vector
- */
- next_idx++;
- bio_nbytes += nbytes;
- }
-
- total_bytes += nbytes;
- nr_bytes -= nbytes;
-
- bio = req->bio;
- if (bio) {
- /*
- * end more in this run, or just return 'not-done'
- */
- if (unlikely(nr_bytes <= 0))
- break;
- }
+ if (!nr_bytes)
+ break;
}
/*
@@ -2365,16 +2359,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
return false;
}
- /*
- * if the request wasn't completed, update state
- */
- if (bio_nbytes) {
- req_bio_endio(req, bio, bio_nbytes, error);
- bio->bi_idx += next_idx;
- bio_iovec(bio)->bv_offset += nr_bytes;
- bio_iovec(bio)->bv_len -= nr_bytes;
- }
-
req->__data_len -= total_bytes;
req->buffer = bio_data(req->bio);
@@ -3046,6 +3030,149 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ spin_lock_irq(q->queue_lock);
+ if (q->nr_pending) {
+ ret = -EBUSY;
+ pm_runtime_mark_last_busy(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDING;
+ }
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ __blk_run_queue(q);
+ pm_runtime_mark_last_busy(q->dev);
+ pm_runtime_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+#endif
+
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4f0ade74cfd0..d5cd3131c57a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
- if (cfqq) {
- sector_t sector = bio->bi_sector + bio_sectors(bio);
-
- return elv_rb_find(&cfqq->sort_list, sector);
- }
+ if (cfqq)
+ return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
return NULL;
}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 90037b5eb17f..ba19a3afab79 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
* check for front merge
*/
if (dd->front_merges) {
- sector_t sector = bio->bi_sector + bio_sectors(bio);
+ sector_t sector = bio_end_sector(bio);
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
diff --git a/block/elevator.c b/block/elevator.c
index a0ffdd943c98..eba5b04c29b1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,6 +34,7 @@
#include <linux/blktrace_api.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <trace/events/block.h>
@@ -536,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
}
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_requeue_request(struct request *rq)
+{
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+ rq->q->nr_pending--;
+}
+
+static void blk_pm_add_request(struct request_queue *q, struct request *rq)
+{
+ if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+ (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+}
+#else
+static inline void blk_pm_requeue_request(struct request *rq) {}
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+}
+#endif
+
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -550,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
rq->cmd_flags &= ~REQ_STARTED;
+ blk_pm_requeue_request(rq);
+
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}
@@ -572,6 +596,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
+ blk_pm_add_request(q, rq);
+
rq->q = q;
if (rq->cmd_flags & REQ_SOFTBARRIER) {
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index ff5804e2f1d2..c85fc895ecdb 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -238,7 +238,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
- pte = kzalloc(count, GFP_KERNEL);
+ pte = kmalloc(count, GFP_KERNEL);
if (!pte)
return NULL;
@@ -267,7 +267,7 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
gpt_header *gpt;
unsigned ssz = bdev_logical_block_size(state->bdev);
- gpt = kzalloc(ssz, GFP_KERNEL);
+ gpt = kmalloc(ssz, GFP_KERNEL);
if (!gpt)
return NULL;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b6d7c51e39..5efed089a702 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -928,7 +928,7 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->resid = bio->bi_size;
buf->sector = bio->bi_sector;
bio_pageinc(bio);
- buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
+ buf->bv = bv = bio_iovec(bio);
buf->bv_resid = bv->bv_len;
WARN_ON(buf->bv_resid == 0);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 531ceb31d0ff..f1a29f8e9d33 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
int err = -EIO;
sector = bio->bi_sector;
- if (sector + (bio->bi_size >> SECTOR_SHIFT) >
- get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c49e85608101..04ceb7e2fadd 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3775,7 +3775,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_idx = 0;
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9f2d348f7115..3c08983e600a 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
pd->iosched.successive_reads += bio->bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+ pd->iosched.last_write = bio_end_sector(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
@@ -948,31 +948,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
/*
- * Copy CD_FRAMESIZE bytes from src_bio into a destination page
- */
-static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
-{
- unsigned int copy_size = CD_FRAMESIZE;
-
- while (copy_size > 0) {
- struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
- void *vfrom = kmap_atomic(src_bvl->bv_page) +
- src_bvl->bv_offset + offs;
- void *vto = page_address(dst_page) + dst_offs;
- int len = min_t(int, copy_size, src_bvl->bv_len - offs);
-
- BUG_ON(len < 0);
- memcpy(vto, vfrom, len);
- kunmap_atomic(vfrom);
-
- seg++;
- offs = 0;
- dst_offs += len;
- copy_size -= len;
- }
-}
-
-/*
* Copy all data for this packet to pkt->pages[], so that
* a) The number of required segments for the write bio is minimized, which
* is necessary for some scsi controllers.
@@ -1181,16 +1156,15 @@ static int pkt_start_recovery(struct packet_data *pkt)
new_sector = new_block * (CD_FRAMESIZE >> 9);
pkt->sector = new_sector;
+ bio_reset(pkt->bio);
+ pkt->bio->bi_bdev = pd->bdev;
+ pkt->bio->bi_rw = REQ_WRITE;
pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_next = NULL;
- pkt->bio->bi_flags = 1 << BIO_UPTODATE;
- pkt->bio->bi_idx = 0;
+ pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_vcnt = pkt->frames;
- BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
- BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
- BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
- BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
- BUG_ON(pkt->bio->bi_private != pkt);
+ pkt->bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->bio->bi_private = pkt;
drop_super(sb);
return 1;
@@ -1325,55 +1299,35 @@ try_next_bio:
*/
static void pkt_start_write(struct pktcdvd_device