summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c14
-rw-r--r--block/blk-core.c80
-rw-r--r--block/blk-map.c21
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c23
-rw-r--r--block/blk-sysfs.c26
-rw-r--r--block/blk-tag.c16
-rw-r--r--block/blk.h1
-rw-r--r--block/bsg.c2
-rw-r--r--block/elevator.c23
-rw-r--r--block/genhd.c8
-rw-r--r--block/scsi_ioctl.c3
12 files changed, 133 insertions, 90 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 55c5f1fc4f1f..a09ead19f9c5 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) {
- printk(KERN_ERR "%s: prepare_flush_fn required\n",
- __FUNCTION__);
+ printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL;
}
@@ -53,7 +52,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
/*
* Cache flushing for ordered writes handling
*/
-inline unsigned blk_ordered_cur_seq(struct request_queue *q)
+unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
@@ -143,10 +142,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
end_io = post_flush_end_io;
}
+ blk_rq_init(q, rq);
rq->cmd_flags = REQ_HARDBARRIER;
- rq_init(q, rq);
- rq->elevator_private = NULL;
- rq->elevator_private2 = NULL;
rq->rq_disk = q->bar_rq.rq_disk;
rq->end_io = end_io;
q->prepare_flush_fn(q, rq);
@@ -167,14 +164,11 @@ static inline struct request *start_ordered(struct request_queue *q,
blkdev_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
- rq->cmd_flags = 0;
- rq_init(q, rq);
+ blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_FUA)
rq->cmd_flags |= REQ_FUA;
- rq->elevator_private = NULL;
- rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a438a93f723..b754a4a2f9bd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
-/*
- * We can't just memset() the structure, since the allocation path
- * already stored some information in the request.
- */
-void rq_init(struct request_queue *q, struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq)
{
+ memset(rq, 0, sizeof(*rq));
+
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
- rq->nr_sectors = rq->hard_nr_sectors = 0;
- rq->current_nr_sectors = rq->hard_cur_sectors = 0;
- rq->bio = rq->biotail = NULL;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
- rq->rq_disk = NULL;
- rq->nr_phys_segments = 0;
- rq->nr_hw_segments = 0;
- rq->ioprio = 0;
- rq->special = NULL;
- rq->buffer = NULL;
+ rq->cmd = rq->__cmd;
rq->tag = -1;
- rq->errors = 0;
rq->ref_count = 1;
- rq->cmd_len = 0;
- memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->data_len = 0;
- rq->extra_len = 0;
- rq->sense_len = 0;
- rq->data = NULL;
- rq->sense = NULL;
- rq->end_io = NULL;
- rq->end_io_data = NULL;
- rq->next_rq = NULL;
}
+EXPORT_SYMBOL(blk_rq_init);
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
@@ -156,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
if (unlikely(nbytes > bio->bi_size)) {
printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __FUNCTION__, nbytes, bio->bi_size);
+ __func__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
- for (bit = 0; bit < sizeof(rq->cmd); bit++)
+ for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
if (blk_queue_stopped(q))
return;
- if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+ __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
- if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
return 0;
+ queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
del_timer(&q->unplug_timer);
return 1;
}
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
- clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
- clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
- set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue);
* blk_run_queue - run a single device queue
* @q: The queue to run
*/
-void blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q);
/*
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q)
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
- clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
}
}
+}
+EXPORT_SYMBOL(__blk_run_queue);
+
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+ unsigned long flags;
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
{
mutex_lock(&q->sysfs_lock);
- set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+ queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
if (q->elevator)
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
if (!rq)
return NULL;
+ blk_rq_init(q, rq);
+
/*
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
@@ -789,8 +784,6 @@ rq_starved:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
- rq_init(q, rq);
-
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;
@@ -1573,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error,
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that");
printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
- __FUNCTION__, bio->bi_idx,
- bio->bi_vcnt);
+ __func__, bio->bi_idx, bio->bi_vcnt);
break;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index 3c942bd6422a..0b1af5a3537c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
+ *
+ * Description:
+ * Data will be mapped directly if possible. Otherwise a bounce
+ * buffer is used.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
+ unsigned long kaddr;
+ unsigned int alignment;
+ int reading = rq_data_dir(rq) == READ;
+ int do_copy = 0;
struct bio *bio;
if (len > (q->max_hw_sectors << 9))
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (!len || !kbuf)
return -EINVAL;
- bio = bio_map_kern(q, kbuf, len, gfp_mask);
+ kaddr = (unsigned long)kbuf;
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ do_copy = ((kaddr & alignment) || (len & alignment));
+
+ if (do_copy)
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ else
+ bio = bio_map_kern(q, kbuf, len, gfp_mask);
+
if (IS_ERR(bio))
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
+ if (do_copy)
+ rq->cmd_flags |= REQ_COPY_USER;
+
blk_rq_bio_prep(q, rq, bio);
blk_queue_bounce(q, &rq->bio);
rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b5c5c4a9e3f0..73b23562af20 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
if (!rq->bio)
return;
- cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
- if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+ if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0;
if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs, cluster;
nsegs = 0;
- cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
/*
* for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5713f7e5cbd2..bb93d4c32775 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
-EXPORT_SYMBOL(blk_max_pfn);
/**
* blk_queue_prep_rq - set a prepare_request function for queue
@@ -169,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_sectors);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_sectors);
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -197,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{
if (!max_segments) {
max_segments = 1;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
}
q->max_phys_segments = max_segments;
@@ -221,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{
if (!max_segments) {
max_segments = 1;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
}
q->max_hw_segments = max_segments;
@@ -242,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_size);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_size);
}
q->max_segment_size = max_size;
@@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
- clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
}
EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -358,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
- printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
- mask);
+ printk(KERN_INFO "%s: set to minimum %lx\n",
+ __func__, mask);
}
q->seg_boundary_mask = mask;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fc41d83be22b..e85c4013e8a2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ if (nm)
+ set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+ else
+ clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_hw_sector_size_show,
};
+static struct queue_sysfs_entry queue_nomerges_entry = {
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nomerges_show,
+ .store = queue_nomerges_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
+ &queue_nomerges_entry.attr,
NULL,
};
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4780a46ce234..de64e0429977 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
__blk_free_tags(bqt);
q->queue_tags = NULL;
- q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+ queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}
/**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
**/
void blk_queue_free_tags(struct request_queue *q)
{
- clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n",
- __FUNCTION__, depth);
+ __func__, depth);
}
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc;
- set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(&tags->refcnt);
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
* assign it, all done
*/
q->queue_tags = tags;
- q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+ queue_flag_set(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n",
- __FUNCTION__, tag);
+ __func__, tag);
bqt->tag_index[tag] = NULL;
if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __FUNCTION__, tag);
+ __func__, tag);
return;
}
/*
@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
- __FUNCTION__, rq,
+ __func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
diff --git a/block/blk.h b/block/blk.h
index ec9120fb789a..59776ab4742a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -10,7 +10,6 @@
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;
-void rq_init(struct request_queue *q, struct request *rq);
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
diff --git a/block/bsg.c b/block/bsg.c
index d8b889d2e411..f0b7cd343216 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -57,7 +57,7 @@ enum {
#undef BSG_DEBUG
#ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else
#define dprintk(fmt, args...)
#endif
diff --git a/block/elevator.c b/block/elevator.c
index 88318c383608..980f8ae147b4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -69,7 +69,7 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
/*
* can we safely merge with this request?
*/
-inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+int elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
if (!rq_mergeable(rq))
return 0;
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
}
}
+ if (blk_queue_nomerges(q))
+ return ELEVATOR_NO_MERGE;
+
/*
* See if our hash lookup can find a potential backmerge.
*/
@@ -647,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
- __FUNCTION__, where);
+ __func__, where);
BUG();
}
@@ -805,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
rq->cmd_flags |= REQ_QUIET;
end_queued_request(rq, 0);
} else {
- printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
- ret);
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
}
}
@@ -1070,7 +1072,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
*/
spin_lock_irq(q->queue_lock);
- set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
elv_drain_elevator(q);
@@ -1104,7 +1106,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
* finally exit old elevator and turn off BYPASS.
*/
elevator_exit(old_elevator);
- clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+
return 1;
fail_register:
@@ -1115,7 +1120,11 @@ fail_register:
elevator_exit(e);
q->elevator = old_elevator;
elv_register_queue(q);
- clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+
return 0;
}
diff --git a/block/genhd.c b/block/genhd.c
index 00da5219ee37..fda9c7a63c29 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -182,11 +182,17 @@ static int exact_lock(dev_t devt, void *data)
*/
void add_disk(struct gendisk *disk)
{
+ struct backing_dev_info *bdi;
+
disk->flags |= GENHD_FL_UP;
blk_register_region(MKDEV(disk->major, disk->first_minor),
disk->minors, NULL, exact_match, exact_lock, disk);
register_disk(disk);
blk_register_queue(disk);
+
+ bdi = &disk->queue->backing_dev_info;
+ bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
+ sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi");
}
EXPORT_SYMBOL(add_disk);
@@ -194,6 +200,8 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
void unlink_gendisk(struct gendisk *disk)
{
+ sysfs_remove_link(&disk->dev.kobj, "bdi");
+ bdi_unregister(&disk->queue->backing_dev_info);
blk_unregister_queue(disk);
blk_unregister_region(MKDEV(disk->major, disk->first_minor),
disk->minors);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index aaf07e413ffd..78199c08ec92 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -216,8 +216,6 @@ EXPORT_SYMBOL_GPL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
{
- memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
if (blk_verify_command(rq->cmd, has_write_perm))
@@ -530,7 +528,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq->data_len = 0;
rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
- memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = cmd;
rq->cmd[4] = data;
rq->cmd_len = 6;