From: Bart Van Assche Date: Wed, 31 Aug 2016 22:17:49 +0000 (-0700) Subject: dm: mark request_queue dead before destroying the DM device X-Git-Tag: firefly_0821_release~176^2~4^2~22^2~90 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=90be7f1538fb0ab22582f018e42115f18315eb8d;p=firefly-linux-kernel-4.4.55.git dm: mark request_queue dead before destroying the DM device commit 3b785fbcf81c3533772c52b717f77293099498d3 upstream. This avoids that new requests are queued while __dm_destroy() is in progress. Signed-off-by: Bart Van Assche Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a42729ebf272..0efc3d60e6b7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2869,6 +2869,7 @@ EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { + struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; @@ -2879,6 +2880,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); + spin_lock_irq(q->queue_lock); + queue_flag_set(QUEUE_FLAG_DYING, q); + spin_unlock_irq(q->queue_lock); + if (dm_request_based(md) && md->kworker_task) flush_kthread_worker(&md->kworker);