From d15bb3a6467e102e60d954aadda5fb19ce6fd8ec Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 11 Nov 2016 17:05:27 -0800 Subject: dm rq: fix a race condition in rq_completed() It is required to hold the queue lock when calling blk_run_queue_async() to avoid that a race between blk_run_queue_async() and blk_cleanup_queue() is triggered. Cc: stable@vger.kernel.org Signed-off-by: Bart Van Assche Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1d0d2adc050a..31a89c8832c0 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) { + struct request_queue *q = md->queue; + unsigned long flags; + atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ @@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) * back into ->request_fn() could deadlock attempting to grab the * queue lock again. */ - if (!md->queue->mq_ops && run_queue) - blk_run_queue_async(md->queue); + if (!q->mq_ops && run_queue) { + spin_lock_irqsave(q->queue_lock, flags); + blk_run_queue_async(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } /* * dm_put() must be at the end of this function. See the comment above -- cgit v1.2.3