summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 10:23:51 -0600
committerJens Axboe <axboe@kernel.dk>2018-11-07 13:42:32 -0700
commita1ce35fa49852db60fc6e268038530be533c5b15 (patch)
tree7a0bb9576a3f0e31e22f849463eee6cdda26bad5 /block
parentf382fb0bcef4c37dc049e9f6963e3baf204d815c (diff)
block: remove dead elevator code
This removes a bunch of core and elevator related code. On the core front, we remove anything related to queue running, draining, initialization, plugging, and congestions. We also kill anything related to request allocation, merging, retrieval, and completion. Remove any checking for single queue IO schedulers, as they no longer exist. This means we can also delete a bunch of code related to request issue, adding, completion, etc - and all the SQ related ops and helpers. Also kill the load_default_modules(), as all that did was provide for a way to load the default single queue elevator. Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c1
-rw-r--r--block/blk-core.c1709
-rw-r--r--block/blk-exec.c20
-rw-r--r--block/blk-ioc.c33
-rw-r--r--block/blk-merge.c5
-rw-r--r--block/blk-settings.c36
-rw-r--r--block/blk-sysfs.c36
-rw-r--r--block/blk.h51
-rw-r--r--block/elevator.c377
-rw-r--r--block/kyber-iosched.c1
-rw-r--r--block/mq-deadline.c1
11 files changed, 52 insertions, 2218 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 3a27d31fcda6..44c7e567aa25 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5745,7 +5745,6 @@ static struct elevator_type iosched_bfq_mq = {
.exit_sched = bfq_exit_queue,
},
- .uses_mq = true,
.icq_size = sizeof(struct bfq_io_cq),
.icq_align = __alignof__(struct bfq_io_cq),
.elevator_attrs = bfq_attrs,
diff --git a/block/blk-core.c b/block/blk-core.c
index daaed4dfa719..18538a41a532 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -144,46 +144,6 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-static void blk_clear_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- clear_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /*
- * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
- * flip its congestion state for events on other blkcgs.
- */
- if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-static void blk_set_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- set_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /* see blk_clear_congested() */
- if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-void blk_queue_congestion_threshold(struct request_queue *q)
-{
- int nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) + 1;
- if (nr > q->nr_requests)
- nr = q->nr_requests;
- q->nr_congestion_on = nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
- if (nr < 1)
- nr = 1;
- q->nr_congestion_off = nr;
-}
-
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
@@ -292,99 +252,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
}
EXPORT_SYMBOL(blk_dump_rq_flags);
-static void blk_delay_work(struct work_struct *work)
-{
- struct request_queue *q;
-
- q = container_of(work, struct request_queue, delay_work.work);
- spin_lock_irq(q->queue_lock);
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_delay_queue - restart queueing after defined interval
- * @q: The &struct request_queue in question
- * @msecs: Delay in msecs
- *
- * Description:
- * Sometimes queueing needs to be postponed for a little while, to allow
- * resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
- */
-void blk_delay_queue(struct request_queue *q, unsigned long msecs)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_dead(q)))
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_delay_queue);
-
-/**
- * blk_start_queue_async - asynchronously restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue_async() will clear the stop flag on the queue, and
- * ensure that the request_fn for the queue is run from an async
- * context.
- **/
-void blk_start_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL(blk_start_queue_async);
-
-/**
- * blk_start_queue - restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue() will clear the stop flag on the queue, and call
- * the request_fn for the queue if it was in a stopped state when
- * entered. Also see blk_stop_queue().
- **/
-void blk_start_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
-}
-EXPORT_SYMBOL(blk_start_queue);
-
-/**
- * blk_stop_queue - stop a queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * The Linux block layer assumes that a block driver will consume all
- * entries on the request queue when the request_fn strategy is called.
- * Often this will not happen, because of hardware limitations (queue
- * depth settings). If a device driver gets a 'queue full' response,
- * or if it simply chooses not to queue more I/O at one point, it can
- * call this function to prevent the request_fn from being called until
- * the driver has signalled it's ready to go again. This happens by calling
- * blk_start_queue() to restart queue operations.
- **/
-void blk_stop_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- cancel_delayed_work(&q->delay_work);
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
-}
-EXPORT_SYMBOL(blk_stop_queue);
-
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -415,8 +282,6 @@ void blk_sync_queue(struct request_queue *q)
cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
- } else {
- cancel_delayed_work_sync(&q->delay_work);
}
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -442,250 +307,12 @@ void blk_clear_pm_only(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
-/**
- * __blk_run_queue_uncond - run a queue whether or not it has been stopped
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on a queue if there are any pending requests.
- * May be used to restart request handling after a request has completed.
- * This variant runs the queue whether or not the queue has been
- * stopped. Must be called with the queue lock held and interrupts
- * disabled. See also @blk_run_queue.
- */
-inline void __blk_run_queue_uncond(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_dead(q)))
- return;
-
- /*
- * Some request_fn implementations, e.g. scsi_request_fn(), unlock
- * the queue lock internally. As a result multiple threads may be
- * running such a request function concurrently. Keep track of the
- * number of active request_fn invocations such that blk_drain_queue()
- * can wait until all these request_fn calls have finished.
- */
- q->request_fn_active++;
- q->request_fn(q);
- q->request_fn_active--;
-}
-EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
-
-/**
- * __blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * See @blk_run_queue.
- */
-void __blk_run_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- __blk_run_queue_uncond(q);
-}
-EXPORT_SYMBOL(__blk_run_queue);
-
-/**
- * blk_run_queue_async - run a single device queue in workqueue context
- * @q: The queue to run
- *
- * Description:
- * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
- *
- * Note:
- * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
- * has canceled q->delay_work, callers must hold the queue lock to avoid
- * race conditions between blk_cleanup_queue() and blk_run_queue_async().
- */
-void blk_run_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
- mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-}
-EXPORT_SYMBOL(blk_run_queue_async);
-
-/**
- * blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on this queue, if it has pending work to do.
- * May be used to restart queueing when a request has completed.
- */
-void blk_run_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_run_queue);
-
void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
-/**
- * __blk_drain_queue - drain requests from request_queue
- * @q: queue to drain
- * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
- *
- * Drain requests from @q. If @drain_all is set, all requests are drained.
- * If not, only ELVPRIV requests are drained. The caller is responsible
- * for ensuring that no new requests which need to be drained are queued.
- */
-static void __blk_drain_queue(struct request_queue *q, bool drain_all)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
-{
- int i;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- while (true) {
- bool drain = false;
-
- /*
- * The caller might be trying to drain @q before its
- * elevator is initialized.
- */
- if (q->elevator)
- elv_drain_elevator(q);
-
- blkcg_drain_queue(q);
-
- /*
- * This function might be called on a queue which failed
- * driver init after queue creation or is not yet fully
- * active yet. Some drivers (e.g. fd and loop) get unhappy
- * in such cases. Kick queue iff dispatch queue has
- * something on it and @q has request_fn set.
- */
- if (!list_empty(&q->queue_head) && q->request_fn)
- __blk_run_queue(q);
-
- drain |= q->nr_rqs_elvpriv;
- drain |= q->request_fn_active;
-
- /*
- * Unfortunately, requests are queued at and tracked from
- * multiple places and there's no single counter which can
- * be drained. Check all the queues and counters.
- */
- if (drain_all) {
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
- drain |= !list_empty(&q->queue_head);
- for (i = 0; i < 2; i++) {
- drain |= q->nr_rqs[i];
- drain |= q->in_flight[i];
- if (fq)
- drain |= !list_empty(&fq->flush_queue[i]);
- }
- }
-
- if (!drain)
- break;
-
- spin_unlock_irq(q->queue_lock);
-
- msleep(10);
-
- spin_lock_irq(q->queue_lock);
- }
-
- /*
- * With queue marked dead, any woken up waiter will fail the
- * allocation path, so the wakeup chaining is lost and we're
- * left with hung waiters. We need to wake up those waiters.
- */
- if (q->request_fn) {
- struct request_list *rl;
-
- blk_queue_for_each_rl(rl, q)
- for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
- wake_up_all(&rl->wait[i]);
- }
-}
-
-void blk_drain_queue(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, true);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_queue_bypass_start - enter queue bypass mode
- * @q: queue of interest
- *
- * In bypass mode, only the dispatch FIFO queue of @q is used. This
- * function makes @q enter bypass mode and drains all requests which were
- * throttled or issued before. On return, it's guaranteed that no request
- * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
- * inside queue or RCU read lock.
- */
-void blk_queue_bypass_start(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->bypass_depth++;
- queue_flag_set(QUEUE_FLAG_BYPASS, q);
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Queues start drained. Skip actual draining till init is
- * complete. This avoids lenghty delays during queue init which
- * can happen many times during boot.
- */
- if (blk_queue_init_done(q)) {
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, false);
- spin_unlock_irq(q->queue_lock);
-
- /* ensure blk_queue_bypass() is %true inside RCU read lock */
- synchronize_rcu();
- }
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
-
-/**
- * blk_queue_bypass_end - leave queue bypass mode
- * @q: queue of interest
- *
- * Leave bypass mode and restore the normal queueing behavior.
- *
- * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
- * this function is called for both blk-sq and blk-mq queues.
- */
-void blk_queue_bypass_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- if (!--q->bypass_depth)
- queue_flag_clear(QUEUE_FLAG_BYPASS, q);
- WARN_ON_ONCE(q->bypass_depth < 0);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
-
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
@@ -699,18 +326,6 @@ void blk_set_queue_dying(struct request_queue *q)
if (q->mq_ops)
blk_mq_wake_waiters(q);
- else {
- struct request_list *rl;
-
- spin_lock_irq(q->queue_lock);
- blk_queue_for_each_rl(rl, q) {
- if (rl->rq_pool) {
- wake_up_all(&rl->wait[BLK_RW_SYNC]);
- wake_up_all(&rl->wait[BLK_RW_ASYNC]);
- }
- }
- spin_unlock_irq(q->queue_lock);
- }
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
@@ -822,6 +437,7 @@ void blk_cleanup_queue(struct request_queue *q)
if (q->mq_ops)
blk_mq_free_queue(q);
+
percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
@@ -1013,8 +629,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
- q->end_sector = 0;
- q->boundary_rq = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
@@ -1047,7 +661,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
- INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1100,105 +713,6 @@ fail_q:
}
EXPORT_SYMBOL(blk_alloc_queue_node);
-/**
- * blk_init_queue - prepare a request queue for use with a block device
- * @rfn: The function to be called to process requests that have been
- * placed on the queue.
- * @lock: Request queue spin lock
- *
- * Description:
- * If a block device wishes to use the standard request handling procedures,
- * which sorts requests and coalesces adjacent requests, then it must
- * call blk_init_queue(). The function @rfn will be called when there
- * are requests on the queue that need to be processed. If the device
- * supports plugging, then @rfn may not be called immediately when requests
- * are available on the queue, but may be called at some time later instead.
- * Plugged queues are generally unplugged when a buffer belonging to one
- * of the requests on the queue is needed, or due to memory pressure.
- *
- * @rfn is not required, or even expected, to remove all requests off the
- * queue, but only as many as it can handle at a time. If it does leave
- * requests on the queue, it is responsible for arranging that the requests
- * get dealt with eventually.
- *
- * The queue spin lock must be held while manipulating the requests on the
- * request queue; this lock will be taken also from interrupt context, so irq
- * disabling is needed for it.
- *
- * Function returns a pointer to the initialized request queue, or %NULL if
- * it didn't succeed.
- *
- * Note:
- * blk_init_queue() must be paired with a blk_cleanup_queue() call
- * when the block device is deactivated (such as at module unload).
- **/
-
-struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
-{
- return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(blk_init_queue);
-
-struct request_queue *
-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
-{
- struct request_queue *q;
-
- q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
- if (!q)
- return NULL;
-
- q->request_fn = rfn;
- if (blk_init_allocated_queue(q) < 0) {
- blk_cleanup_queue(q);
- return NULL;
- }
-
- return q;
-}
-EXPORT_SYMBOL(blk_init_queue_node);
-
-static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-
-
-int blk_init_allocated_queue(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
- if (!q->fq)
- return -ENOMEM;
-
- if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
- goto out_free_flush_queue;
-
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
- goto out_exit_flush_rq;
-
- INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->queue_flags |= QUEUE_FLAG_DEFAULT;
-
- /*
- * This also sets hw/phys segments, boundary and size
- */
- blk_queue_make_request(q, blk_queue_bio);
-
- q->sg_reserved_size = INT_MAX;
-
- if (elevator_init(q))
- goto out_exit_flush_rq;
- return 0;
-
-out_exit_flush_rq:
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
-out_free_flush_queue:
- blk_free_flush_queue(q->fq);
- q->fq = NULL;
- return -ENOMEM;
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dying(q))) {
@@ -1210,406 +724,6 @@ bool blk_get_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(struct request_list *rl, struct request *rq)
-{
- if (rq->rq_flags & RQF_ELVPRIV) {
- elv_put_request(rl->q, rq);
- if (rq->elv.icq)
- put_io_context(rq->elv.icq->ioc);
- }
-
- mempool_free(rq, rl->rq_pool);
-}
-
-/*
- * ioc_batching returns true if the ioc is a valid batching request and
- * should be given priority access to a request.
- */
-static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc)
- return 0;
-
- /*
- * Make sure the process is able to allocate at least 1 request
- * even if the batch times out, otherwise we could theoretically
- * lose wakeups.
- */
- return ioc->nr_batch_requests == q->nr_batching ||
- (ioc->nr_batch_requests > 0
- && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
-}
-
-/*
- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
- * will cause the process to be a "batcher" on all queues in the system. This
- * is the behaviour we want though - once it gets a wakeup it should be given
- * a nice run.
- */
-static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc || ioc_batching(q, ioc))
- return;
-
- ioc->nr_batch_requests = q->nr_batching;
- ioc->last_waited = jiffies;
-}
-
-static void __freed_request(struct request_list *rl, int sync)
-{
- struct request_queue *q = rl->q;
-
- if (rl->count[sync] < queue_congestion_off_threshold(q))
- blk_clear_congested(rl, sync);
-
- if (rl->count[sync] + 1 <= q->nr_requests) {
- if (waitqueue_active(&rl->wait[sync]))
- wake_up(&rl->wait[sync]);
-
- blk_clear_rl_full(rl, sync);
- }
-}
-
-/*
- * A request has just been released. Account for it, update the full and
- * congestion status, wake up any waiters. Called under q->queue_lock.
- */
-static void freed_request(struct request_list *rl, bool sync,
- req_flags_t rq_flags)
-{
- struct request_queue *q = rl->q;
-
- q->nr_rqs[sync]--;
- rl->count[sync]--;
- if (rq_flags & RQF_ELVPRIV)
- q->nr_rqs_elvpriv--;
-
- __freed_request(rl, sync);
-
- if (unlikely(rl->starved[sync ^ 1]))
- __freed_request(rl, sync ^ 1);
-}
-
-int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
-{
- struct request_list *rl;
- int on_thresh, off_thresh;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->nr_requests = nr;
- blk_queue_congestion_threshold(q);
- on_thresh = queue_congestion_on_threshold(q);
- off_thresh = queue_congestion_off_threshold(q);
-
- blk_queue_for_each_rl(rl, q) {
- if (rl->count[BLK_RW_SYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_SYNC);
- else if (rl->count[BLK_RW_SYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_SYNC);
-
- if (rl->count[BLK_RW_ASYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_ASYNC);
- else if (rl->count[BLK_RW_ASYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_ASYNC);
-
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_SYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
- }
-
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_ASYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
- }
-
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-/**
- * __get_request - get a free request
- * @rl: request list to allocate from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLQ_MQ_REQ_* flags
- * @gfp_mask: allocator flags
- *
- * Get a free request from @q. This function may fail under memory
- * pressure or if @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *__get_request(struct request_list *rl, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
-{
- struct request_queue *q = rl->q;
- struct request *rq;
- struct elevator_type *et = q->elevator->type;
- struct io_context *ioc = rq_ioc(bio);
- struct io_cq *icq = NULL;
- const bool is_sync = op_is_sync(op);
- int may_queue;
- req_flags_t rq_flags = RQF_ALLOCED;
-
- lockdep_assert_held(q->queue_lock);
-
- if (unlikely(blk_queue_dying(q)))
- return ERR_PTR(-ENODEV);
-
- may_queue = elv_may_queue(q, op);
- if (may_queue == ELV_MQUEUE_NO)
- goto rq_starved;
-
- if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
- if (rl->count[is_sync]+1 >= q->nr_requests) {
- /*
- * The queue will fill after this allocation, so set
- * it as full, and mark this process as "batching".
- * This process will be allowed to complete a batch of
- * requests, others will be blocked.
- */
- if (!blk_rl_full(rl, is_sync)) {
- ioc_set_batching(q, ioc);
- blk_set_rl_full(rl, is_sync);
- } else {
- if (may_queue != ELV_MQUEUE_MUST
- && !ioc_batching(q, ioc)) {
- /*
- * The queue is full and the allocating
- * process is not a "batcher", and not
- * exempted by the IO scheduler
- */
- return ERR_PTR(-ENOMEM);
- }
- }
- }
- blk_set_congested(rl, is_sync);
- }
-
- /*
- * Only allow batching queuers to allocate up to 50% over the defined
- * limit of requests, otherwise we could have thousands of requests
- * allocated with any setting of ->nr_requests
- */
- if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- return ERR_PTR(-ENOMEM);
-
- q->nr_rqs[is_sync]++;
- rl->count[is_sync]++;
- rl->starved[is_sync] = 0;
-
- /*
- * Decide whether the new request will be managed by elevator. If
- * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
- * prevent the current elevator from being destroyed until the new
- * request is freed. This guarantees icq's won't be destroyed and
- * makes creating new ones safe.
- *
- * Flush requests do not use the elevator so skip initialization.
- * This allows a request to share the flush and elevator data.
- *
- * Also, lookup icq while holding queue_lock. If it doesn't exist,
- * it will be created after releasing queue_lock.
- */
- if (!op_is_flush(op) && !blk_queue_bypass(q)) {
- rq_flags |= RQF_ELVPRIV;
- q->nr_rqs_elvpriv++;
- if (et->icq_cache && ioc)
- icq = ioc_lookup_icq(ioc, q);
- }
-
- if (blk_queue_io_stat(q))
- rq_flags |= RQF_IO_STAT;
- spin_unlock_irq(q->queue_lock);
-
- /* allocate and init request */
- rq = mempool_alloc(rl->rq_pool, gfp_mask);
- if (!rq)
- goto fail_alloc;
-
- blk_rq_init(q, rq);
- blk_rq_set_rl(rq, rl);
- rq->cmd_flags = op;
- rq->rq_flags = rq_flags;
- if (flags & BLK_MQ_REQ_PREEMPT)
- rq->rq_flags |= RQF_PREEMPT;
-
- /* init elvpriv */
- if (rq_flags & RQF_ELVPRIV) {
- if (unlikely(et->icq_cache && !icq)) {
- if (ioc)
- icq = ioc_create_icq(ioc, q, gfp_mask);
- if (!icq)
- goto fail_elvpriv;
- }
-
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
- goto fail_elvpriv;
-
- /* @rq->elv.icq holds io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-out:
- /*
- * ioc may be NULL here, and ioc_batching will be false. That's
- * OK, if the queue is under the request limit then requests need
- * not count toward the nr_batch_requests limit. There will always
- * be some limit enforced by BLK_BATCH_TIME.
- */
- if (ioc_batching(q, ioc))
- ioc->nr_batch_requests--;
-
- trace_block_getrq(q, bio, op);
- return rq;
-
-fail_elvpriv:
- /*
- * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
- * and may fail indefinitely under memory pressure and thus
- * shouldn't stall IO. Treat this request as !elvpriv. This will
- * disturb iosched and blkcg but weird is bettern than dead.
- */
- printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info->dev));
-
- rq->rq_flags &= ~RQF_ELVPRIV;
- rq->elv.icq = NULL;
-
- spin_lock_irq(q->queue_lock);
- q->nr_rqs_elvpriv--;
- spin_unlock_irq(q->queue_lock);
- goto out;
-
-fail_alloc:
- /*
- * Allocation failed presumably due to memory. Undo anything we
- * might have messed up.
- *
- * Allocating task should really be put onto the front of the wait
- * queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(rl, is_sync, rq_flags);
-
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved so that
- * freeing of a request in the other direction will notice
- * us. another possible fix would be to split the rq mempool into
- * READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
- return ERR_PTR(-ENOMEM);
-}
-
-/**
- * get_request - get a free request
- * @q: request_queue to allocate request from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLK_MQ_REQ_* flags.
- * @gfp: allocator flags
- *
- * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
- * this function keeps retrying under memory pressure and fails iff @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *get_request(struct request_queue *q, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
-{
- const bool is_sync = op_is_sync(op);
- DEFINE_WAIT(wait);
- struct request_list *rl;
- struct request *rq;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- rl = blk_get_rl(q, bio); /* transferred to @rq on success */
-retry:
- rq = __get_request(rl, op, bio, flags, gfp);
- if (!IS_ERR(rq))
- return rq;
-
- if (op & REQ_NOWAIT) {
- blk_put_rl(rl);
- return ERR_PTR(-EAGAIN);
- }
-
- if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
- blk_put_rl(rl);
- return rq;
- }
-
- /* wait on @rl and retry */
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
-
- trace_block_sleeprq(q, bio, op);
-
- spin_unlock_irq(q->queue_lock);
- io_schedule();
-
- /*
- * After sleeping, we become a "batching" process and will be able
- * to allocate at least one request, and up to a big batch of them
- * for a small period time. See ioc_batching, ioc_set_batching
- */
- ioc_set_batching(q, current->io_context);
-
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
-
- goto retry;
-}
-
-/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
-static struct request *blk_old_get_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
-{
- struct request *rq;
- gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO;
- int ret = 0;
-
- WARN_ON_ONCE(q->mq_ops);
-
- /* create ioc upfront */
- create_io_context(gfp_mask, q->node);
-
- ret = blk_queue_enter(q, flags);
- if (ret)
- return ERR_PTR(ret);
- spin_lock_irq(q->queue_lock);
- rq = get_request(q, op, NULL, flags, gfp_mask);
- if (IS_ERR(rq)) {
- spin_unlock_irq(q->queue_lock);
- blk_queue_exit(q);
- return rq;
- }
-
- /* q->queue_lock is unlocked at this point */
- rq->__data_len = 0;
- rq->__sector = (sector_t) -1;
- rq->bio = rq->biotail = NULL;
- return rq;
-}
-
/**
* blk_get_request - allocate a request
* @q: request queue to allocate a request for
@@ -1624,53 +738,14 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
- if (q->mq_ops) {
- req = blk_mq_alloc_request(q, op, flags);
- if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
- q->mq_ops->initialize_rq_fn(req);
- } else {
- req = blk_old_get_request(q, op, flags);
- if (!IS_ERR(req) && q->initialize_rq_fn)
- q->initialize_rq_fn(req);
- }
+ req = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+ q->mq_ops->initialize_rq_fn(req);
return req;
}
EXPORT_SYMBOL(blk_get_request);
-/**
- * blk_requeue_request - put a request back on queue
- * @q: request queue where request should be inserted
- * @rq: request to be inserted
- *
- * Description:
- * Drivers often keep queueing requests until the hardware cannot accept
- * more, when that condition happens we need to put the request back
- * on the queue. Must be called with queue lock held.
- */
-void blk_requeue_request(struct request_queue *q, struct request *rq)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- blk_delete_timer(rq);
- blk_clear_rq_complete(rq);
- trace_block_rq_requeue(q, rq);
- rq_qos_requeue(q, rq);
-
- BUG_ON(blk_queued_rq(rq));
-
- elv_requeue_request(q, rq);
-}
-EXPORT_SYMBOL(blk_requeue_request);
-
-static void add_acct_request(struct request_queue *q, struct request *rq,
- int where)
-{
- blk_account_io_start(rq, true);
- __elv_add_request(q, rq, where);
-}
-
static void part_round_stats_single(struct request_queue *q, int cpu,
struct hd_struct *part, unsigned long now,
unsigned int inflight)
@@ -1730,61 +805,16 @@ EXPORT_SYMBOL_GPL(part_round_stats);
void __blk_put_request(struct request_queue *q, struct request *req)
{
- req_flags_t rq_flags = req->rq_flags;
-
if (unlikely(!q))
return;
- if (q->mq_ops) {
- blk_mq_free_request(req);
- return;