summaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c107
1 files changed, 55 insertions, 52 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5144707f25ea..590d1ef2f961 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
unsigned long nr;
int ret, err;
- if (!q->request_fn && !q->mq_ops)
+ if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@@ -78,11 +78,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- if (q->request_fn)
- err = blk_update_nr_requests(q, nr);
- else
- err = blk_mq_update_nr_requests(q, nr);
-
+ err = blk_mq_update_nr_requests(q, nr);
if (err)
return err;
@@ -239,10 +235,10 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
return ret;
}
@@ -317,14 +313,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
- queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else if (nm)
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
return ret;
}
@@ -348,18 +342,16 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (val == 2) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 1) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 0) {
- queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
- spin_unlock_irq(q->queue_lock);
#endif
return ret;
}
@@ -407,7 +399,8 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
unsigned long poll_on;
ssize_t ret;
- if (!q->mq_ops || !q->mq_ops->poll)
+ if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
+ !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
return -EINVAL;
ret = queue_var_store(&poll_on, page, count);
@@ -422,6 +415,26 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
+}
+
+static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtou32(page, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+
+ blk_queue_rq_timeout(q, msecs_to_jiffies(val));
+
+ return count;
+}
+
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
if (!wbt_rq_qos(q))
@@ -460,20 +473,14 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- } else
- blk_queue_bypass_start(q);
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
wbt_update_limits(q);
- if (q->mq_ops) {
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
- } else
- blk_queue_bypass_end(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
return count;
}
@@ -696,6 +703,12 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_io_timeout_entry = {
+ .attr = {.name = "io_timeout", .mode = 0644 },
+ .show = queue_io_timeout_show,
+ .store = queue_io_timeout_store,
+};
+
static struct queue_sysfs_entry queue_wb_lat_entry = {
.attr = {.name = "wbt_lat_usec", .mode = 0644 },
.show = queue_wb_lat_show,
@@ -745,6 +758,7 @@ static struct attribute *default_attrs[] = {
&queue_dax_entry.attr,
&queue_wb_lat_entry.attr,
&queue_poll_delay_entry.attr,
+ &queue_io_timeout_entry.attr,
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
&throtl_sample_time_entry.attr,
#endif
@@ -844,24 +858,14 @@ static void __blk_release_queue(struct work_struct *work)
blk_free_queue_stats(q->stats);
- blk_exit_rl(q, &q->root_rl);
-
- if (q->queue_tags)
- __blk_queue_free_tags(q);
-
blk_queue_free_zone_bitmaps(q);
- if (!q->mq_ops) {
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
- blk_free_flush_queue(q->fq);
- } else {
+ if (queue_is_mq(q))
blk_mq_release(q);
- }
blk_trace_shutdown(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_debugfs_unregister(q);
bioset_exit(&q->bio_split);
@@ -906,7 +910,7 @@ int blk_register_queue(struct gendisk *disk)
WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
/*
* SCSI probing may synchronously create and destroy a lot of
@@ -918,9 +922,8 @@ int blk_register_queue(struct gendisk *disk)
* request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
percpu_ref_switch_to_percpu(&q->q_usage_counter);
- blk_queue_bypass_end(q);
}
ret = blk_trace_init_sysfs(dev);
@@ -936,7 +939,7 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
}
@@ -947,7 +950,7 @@ int blk_register_queue(struct gendisk *disk)
blk_throtl_register_queue(q);
- if (q->request_fn || (q->mq_ops && q->elevator)) {
+ if (q->elevator) {
ret = elv_register_queue(q);
if (ret) {
mutex_unlock(&q->sysfs_lock);
@@ -996,7 +999,7 @@ void blk_unregister_queue(struct gendisk *disk)
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
mutex_unlock(&q->sysfs_lock);
@@ -1005,7 +1008,7 @@ void blk_unregister_queue(struct gendisk *disk)
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
- if (q->request_fn || (q->mq_ops && q->elevator))
+ if (q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);