summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/m68k/emu/nfblock.c3
-rw-r--r--arch/powerpc/sysdev/axonram.c8
-rw-r--r--block/blk-core.c261
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/aoe/aoeblk.c14
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/faulty.c14
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c22
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/block/xpram.c5
-rw-r--r--drivers/staging/zram/zram_drv.c8
-rw-r--r--include/linux/blkdev.h26
27 files changed, 232 insertions, 283 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 48e50f8c1c7e..e3011338ab40 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -59,7 +59,7 @@ struct nfhd_device {
struct gendisk *disk;
};
-static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
+static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
{
struct nfhd_device *dev = queue->queuedata;
struct bio_vec *bvec;
@@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
sec += len;
}
bio_endio(bio, 0);
- return 0;
}
static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 265f0f09395a..ba4271919062 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev)
* axon_ram_make_request - make_request() method for block device
* @queue, @bio: see blk_queue_make_request()
*/
-static int
+static void
axon_ram_make_request(struct request_queue *queue, struct bio *bio)
{
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
struct bio_vec *vec;
unsigned int transfered;
unsigned short idx;
- int rc = 0;
phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
phys_end = bank->io_addr + bank->size;
@@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
bio_for_each_segment(vec, bio, idx) {
if (unlikely(phys_mem + vec->bv_len > phys_end)) {
bio_io_error(bio);
- rc = -ERANGE;
- break;
+ return;
}
user_mem = page_address(vec->bv_page) + vec->bv_offset;
@@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
transfered += vec->bv_len;
}
bio_endio(bio, 0);
-
- return rc;
}
/**
diff --git a/block/blk-core.c b/block/blk-core.c
index d34433ae7917..79e41a76d96a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,8 +38,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
-static int __make_request(struct request_queue *q, struct bio *bio);
-
/*
* For the allocated request tables
*/
@@ -541,7 +539,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
/*
* This also sets hw/phys segments, boundary and size
*/
- blk_queue_make_request(q, __make_request);
+ blk_queue_make_request(q, blk_queue_bio);
q->sg_reserved_size = INT_MAX;
@@ -1215,7 +1213,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
-static int __make_request(struct request_queue *q, struct bio *bio)
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
@@ -1241,7 +1239,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* any locks.
*/
if (attempt_plug_merge(current, q, bio, &request_count))
- goto out;
+ return;
spin_lock_irq(q->queue_lock);
@@ -1316,9 +1314,8 @@ get_rq:
out_unlock:
spin_unlock_irq(q->queue_lock);
}
-out:
- return 0;
}
+EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
/*
* If bio->bi_dev is a partition, remap the location
@@ -1417,165 +1414,142 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0;
}
-/**
- * generic_make_request - hand a buffer to its device driver for I/O
- * @bio: The bio describing the location in memory and on the device.
- *
- * generic_make_request() is used to make I/O requests of block
- * devices. It is passed a &struct bio, which describes the I/O that needs
- * to be done.
- *
- * generic_make_request() does not return any status. The
- * success/failure status of the request, along with notification of
- * completion, is delivered asynchronously through the bio->bi_end_io
- * function described (one day) else where.
- *
- * The caller of generic_make_request must make sure that bi_io_vec
- * are set to describe the memory buffer, and that bi_dev and bi_sector are
- * set to describe the device address, and the
- * bi_end_io and optionally bi_private are set to describe how
- * completion notification should be signaled.
- *
- * generic_make_request and the drivers it calls may use bi_next if this
- * bio happens to be merged with someone else, and may change bi_dev and
- * bi_sector for remaps as it sees fit. So the values of these fields
- * should NOT be depended on after the call to generic_make_request.
- */
-static inline void __generic_make_request(struct bio *bio)
+static noinline_for_stack bool
+generic_make_request_checks(struct bio *bio)
{
struct request_queue *q;
- sector_t old_sector;
- int ret, nr_sectors = bio_sectors(bio);
- dev_t old_dev;
+ int nr_sectors = bio_sectors(bio);
int err = -EIO;
+ char b[BDEVNAME_SIZE];
+ struct hd_struct *part;
might_sleep();
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- /*
- * Resolve the mapping until finished. (drivers are
- * still free to implement/resolve their own stacking
- * by explicitly returning 0)
- *
- * NOTE: we don't repeat the blk_size check for each new device.
- * Stacking drivers are expected to know what they are doing.
- */
- old_sector = -1;
- old_dev = 0;
- do {
- char b[BDEVNAME_SIZE];
- struct hd_struct *part;
-
- q = bdev_get_queue(bio->bi_bdev);
- if (unlikely(!q)) {
- printk(KERN_ERR
- "generic_make_request: Trying to access "
- "nonexistent block-device %s (%Lu)\n",
- bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
- goto end_io;
- }
-
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
- nr_sectors > queue_max_hw_sectors(q))) {
- printk(KERN_ERR "bio too big device %s (%u > %u)\n",
- bdevname(bio->bi_bdev, b),
- bio_sectors(bio),
- queue_max_hw_sectors(q));
- goto end_io;
- }
-
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
- goto end_io;
-
- part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
- should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
- goto end_io;
+ q = bdev_get_queue(bio->bi_bdev);
+ if (unlikely(!q)) {
+ printk(KERN_ERR
+ "generic_make_request: Trying to access "
+ "nonexistent block-device %s (%Lu)\n",
+ bdevname(bio->bi_bdev, b),
+ (long long) bio->bi_sector);
+ goto end_io;
+ }
- /*
- * If this device has partitions, remap block n
- * of partition p to block n+start(p) of the disk.
- */
- blk_partition_remap(bio);
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ nr_sectors > queue_max_hw_sectors(q))) {
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
+ goto end_io;
+ }
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
- goto end_io;
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ goto end_io;
- if (old_sector != -1)
- trace_block_bio_remap(q, bio, old_dev, old_sector);
+ part = bio->bi_bdev->bd_part;
+ if (should_fail_request(part, bio->bi_size) ||
+ should_fail_request(&part_to_disk(part)->part0,
+ bio->bi_size))
+ goto end_io;
- old_sector = bio->bi_sector;
- old_dev = bio->bi_bdev->bd_dev;
+ /*
+ * If this device has partitions, remap block n
+ * of partition p to block n+start(p) of the disk.
+ */
+ blk_partition_remap(bio);
- if (bio_check_eod(bio, nr_sectors))
- goto end_io;
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+ goto end_io;
- /*
- * Filter flush bio's early so that make_request based
- * drivers without flush support don't have to worry
- * about them.
- */
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
- bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
- if (!nr_sectors) {
- err = 0;
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
- if ((bio->bi_rw & REQ_DISCARD) &&
- (!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) &&
- !blk_queue_secdiscard(q)))) {
- err = -EOPNOTSUPP;
+ /*
+ * Filter flush bio's early so that make_request based
+ * drivers without flush support don't have to worry
+ * about them.
+ */
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+ if (!nr_sectors) {
+ err = 0;
goto end_io;
}
+ }
- if (blk_throtl_bio(q, &bio))
- goto end_io;
-
- /*
- * If bio = NULL, bio has been throttled and will be submitted
- * later.
- */
- if (!bio)
- break;
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ (!blk_queue_discard(q) ||
+ ((bio->bi_rw & REQ_SECURE) &&
+ !blk_queue_secdiscard(q)))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
- trace_block_bio_queue(q, bio);
+ if (blk_throtl_bio(q, &bio))
+ goto end_io;
- ret = q->make_request_fn(q, bio);
- } while (ret);
+ /* if bio = NULL, bio has been throttled and will be submitted later. */
+ if (!bio)
+ return false;
- return;
+ trace_block_bio_queue(q, bio);
+ return true;
end_io:
bio_endio(bio, err);
+ return false;
}
-/*
- * We only want one ->make_request_fn to be active at a time,
- * else stack usage with stacked devices could be a problem.
- * So use current->bio_list to keep a list of requests
- * submited by a make_request_fn function.
- * current->bio_list is also used as a flag to say if
- * generic_make_request is currently active in this task or not.
- * If it is NULL, then no make_request is active. If it is non-NULL,
- * then a make_request is active, and new requests should be added
- * at the tail
+/**
+ * generic_make_request - hand a buffer to its device driver for I/O
+ * @bio: The bio describing the location in memory and on the device.
+ *
+ * generic_make_request() is used to make I/O requests of block
+ * devices. It is passed a &struct bio, which describes the I/O that needs
+ * to be done.
+ *
+ * generic_make_request() does not return any status. The
+ * success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the bio->bi_end_io
+ * function described (one day) else where.
+ *
+ * The caller of generic_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffer, and that bi_dev and bi_sector are
+ * set to describe the device address, and the
+ * bi_end_io and optionally bi_private are set to describe how
+ * completion notification should be signaled.
+ *
+ * generic_make_request and the drivers it calls may use bi_next if this
+ * bio happens to be merged with someone else, and may resubmit the bio to
+ * a lower device by calling into generic_make_request recursively, which
+ * means the bio should NOT be touched after the call to ->make_request_fn.
*/
void generic_make_request(struct bio *bio)
{
struct bio_list bio_list_on_stack;
+ if (!generic_make_request_checks(bio))
+ return;
+
+ /*
+ * We only want one ->make_request_fn to be active at a time, else
+ * stack usage with stacked devices could be a problem. So use
+ * current->bio_list to keep a list of requests submited by a
+ * make_request_fn function. current->bio_list is also used as a
+ * flag to say if generic_make_request is currently active in this
+ * task or not. If it is NULL, then no make_request is active. If
+ * it is non-NULL, then a make_request is active, and new requests
+ * should be added at the tail
+ */
if (current->bio_list) {
- /* make_request is active */
bio_list_add(current->bio_list, bio);
return;
}
+
/* following loop may be a bit non-obvious, and so deserves some
* explanation.
* Before entering the loop, bio->bi_next is NULL (as all callers
@@ -1583,22 +1557,21 @@ void generic_make_request(struct bio *bio)
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
- * added. __generic_make_request may indeed add some more bios
+ * added. ->make_request() may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from
- * bio_list, and call into __generic_make_request again.
- *
- * The loop was structured like this to make only one call to
- * __generic_make_request (which is important as it is large and
- * inlined) and to keep the structure simple.
+ * bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack);
current->bio_list = &bio_list_on_stack;
do {
- __generic_make_request(bio);
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+
+ q->make_request_fn(q, bio);
+
bio = bio_list_pop(current->bio_list);
} while (bio);
current->bio_list = NULL; /* deactivate */
@@ -2628,6 +2601,20 @@ EXPORT_SYMBOL(kblockd_schedule_delayed_work);
#define PLUG_MAGIC 0x91827364
+/**
+ * blk_start_plug - initialize blk_plug and track it inside the task_struct
+ * @plug: The &struct blk_plug that needs to be initialized
+ *
+ * Description:
+ * Tracking blk_plug inside the task_struct will help with auto-flushing the
+ * pending I/O should the task end up blocking between blk_start_plug() and
+ * blk_finish_plug(). This is important from a performance perspective, but
+ * also ensures that we don't deadlock. For instance, if the task is blocking
+ * for a memory allocation, memory reclaim could end up wanting to free a
+ * page belonging to that request that is currently residing in our private
+ * plug. By flushing the pending I/O when the process goes to sleep, we avoid
+ * this kind of deadlock.
+ */
void blk_start_plug(struct blk_plug *plug)
{
struct task_struct *tsk = current;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 60fda88c57f0..a8eff5f8b9c5 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -457,11 +457,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
}
/**
- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
- * @kobj: the kobj belonging of the request queue to be released
+ * blk_release_queue: - release a &struct request_queue when it is no longer needed
+ * @kobj: the kobj belonging to the request queue to be released
*
* Description:
- * blk_cleanup_queue is the pair to blk_init_queue() or
+ * blk_release_queue is the pair to blk_init_queue() or
* blk_queue_make_request(). It should be called when a request queue is
* being released; typically when a block device is being de-registered.
* Currently, its primary task it to free all the &struct request
diff --git a/block/elevator.c b/block/elevator.c
index a3b64bc71d88..cb332cb7ac6b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -182,7 +182,7 @@ static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
eq->elevator_data = data;
}
-static char chosen_elevator[16];
+static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
{
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 528f6318ded1..167ba0af47f5 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-static int
+static void
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
struct sk_buff_head queue;
@@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
if (bio == NULL) {
printk(KERN_ERR "aoe: bio is NULL\n");
BUG();
- return 0;
+ return;
}
d = bio->bi_bdev->bd_disk->private_data;
if (d == NULL) {
printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
- return 0;
+ return;
} else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
- return 0;
+ return;
}
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
bio_endio(bio, -ENOMEM);
- return 0;
+ return;
}
memset(buf, 0, sizeof(*buf));
INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
bio_endio(bio, -ENXIO);
- return 0;
+ return;
}
list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irqrestore(&d->lock, flags);
aoenet_xmit(&queue);
-
- return 0;
}
static int
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index dba1c32e1ddf..d22119d49e53 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -323,7 +323,7 @@ out:
return err;
}
-static int brd_make_request(struct request_queue *q, struct bio *bio)
+static void brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
out:
bio_endio(bio, err);
-
- return 0;
}
#ifdef CONFIG_BLK_DEV_XIP
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ef2ceed3be4b..36eee3969a98 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1507,7 +1507,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
extern int proc_details;
/* drbd_req */
-extern int drbd_make_request(struct request_queue *q, struct bio *bio);
+extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3424d675b769..4a0f314086e5 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
return 0;
}
-int drbd_make_request(struct request_queue *q, struct bio *bio)
+void drbd_make_request(struct request_queue *q, struct bio *bio)
{
unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
@@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
bio_endio(bio, -EPERM);
- return 0;
+ return;
}
start_time = jiffies;
@@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
if (likely(s_enr == e_enr)) {
inc_ap_bio(mdev, 1);
- return drbd_make_request_common(mdev, bio, start_time);
+ drbd_make_request_common(mdev, bio, start_time);
+ return;
}
/* can this bio be split generically?
@@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
bio_pair_release(bp);
}
- return 0;
}
/* This is called by bio_add_page(). With this function we reduce
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4720c7ade0ae..157ddcb9d0a5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -514,7 +514,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
return bio_list_pop(&lo->lo_bio_list);
}
-static int loop_make_request(struct request_queue *q, struct bio *old_bio)
+static void loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct loop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
@@ -532,12 +532,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
- return 0;
+ return;
out:
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio);
- return 0;
}
struct switch_request {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e133f094ab08..a63b0a2b7805 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static int pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9;
pkt_queue_bio(pd, cloned_bio);
- return 0;
+ return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
pkt_make_request(q, &bp->bio1);
pkt_make_request(q, &bp->bio2);
bio_pair_release(bp);
- return 0;
+ return;
}
}
@@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
spin_unlock(&pkt->lock);
spin_unlock(&pd->cdrw.active_list_lock);
- return 0;
+ return;
} else {
blocked_bio = 1;
}
@@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
*/
wake_up(&pd->wqueue);
}
- return 0;
+ return;
end_io:
bio_io_error(bio);
- return 0;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b3bdb8af89cf..7fad7af87eb2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -596,7 +596,7 @@ out:
return next;
}
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
{
struct ps3_system_bus_device *dev = q->queuedata;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -610,13 +610,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
- return 0;
+ return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
-
- return 0;
}
static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 031ca720d926..aa2712060bfb 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,7 +513,7 @@ static void process_page(unsigned long data)
}
}
-static int mm_make_request(struct request_queue *q, struct bio *bio)
+static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
@@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
card->biotail = &bio->bi_next;
spin_unlock_irq(&card->lock);
- return 0;
+ return;
}
static irqreturn_t mm_interrupt(int irq, void *__card)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 52b39f335bb3..7b986e77b75e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -180,9 +180,6 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* For saving the address of __make_request for request based dm */
- make_request_fn *saved_make_request_fn;
-
/* sysfs handle */
struct kobject kobj;
@@ -1391,7 +1388,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int _dm_request(struct request_queue *q, struct bio *bio)
+static void _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -1412,19 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return 0;
+ return;
}
__split_and_process_bio(md, bio);
up_read(&md->io_lock);
- return 0;
-}
-
-static int dm_make_request(struct request_queue *q, struct bio *bio)
-{
- struct mapped_device *md = q->queuedata;
-
- return md->saved_make_request_fn(q, bio); /* call __make_request() */
+ return;
}
static int dm_request_based(struct mapped_device *md)
@@ -1432,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md)
return blk_queue_stackable(md->queue);
}
-static int dm_request(struct request_queue *q, struct bio *bio)
+static void dm_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
if (dm_request_based(md))
- return dm_make_request(q, bio);
-
- return _dm_request(q, bio);
+ blk_queue_bio(q, bio);
+ else
+ _dm_request(q, bio);
}
void dm_dispatch_request(struct request *rq)
@@ -2172,7 +2162,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
return 0;
md->queue = q;
- md->saved_make_request_fn = md->queue->make_request_fn;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 23078dabb6df..5ef304d4341c 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(mddev_t *mddev, struct bio *bio)
+static void make_request(mddev_t *mddev, struct bio *bio)
{
conf_t *conf = mddev->private;
int failit = 0;
@@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
* just fail immediately
*/
bio_endio(bio, -EIO);
- return 0;
+ return;
}
if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio)
}
if (failit) {
struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
+
b->bi_bdev = conf->rdev->bdev;
b->bi_private = bio;
b->bi_end_io = faulty_fail;
- generic_make_request(b);
- return 0;
- } else {
+ bio = b;
+ } else
bio->bi_bdev = conf->rdev->bdev;
- return 1;
- }
+
+ generic_make_request(bio);
}
static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6cd2c313e800..c6ee491d98e7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
-static int linear_make_request (mddev_t *mddev, struct bio *bio)
+static void linear_make_request (mddev_t *mddev, struct bio *bio)
{
dev_info_t *tmp_dev;
sector_t start_sector;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio)