dm cache: fix device destroy hang due to improper prealloc_used accounting
authorMike Snitzer <snitzer@redhat.com>
Wed, 29 Jul 2015 17:48:23 +0000 (13:48 -0400)
committerMike Snitzer <snitzer@redhat.com>
Wed, 29 Jul 2015 18:32:09 +0000 (14:32 -0400)
Commit 665022d72f9 ("dm cache: avoid calls to prealloc_free_structs() if
possible") introduced a regression that caused the removal of a DM cache
device to hang in cache_postsuspend()'s call to wait_for_migrations()
with the following stack trace:

  [<ffffffff81651457>] schedule+0x37/0x80
  [<ffffffffa041e21b>] cache_postsuspend+0xbb/0x470 [dm_cache]
  [<ffffffff810ba970>] ? prepare_to_wait_event+0xf0/0xf0
  [<ffffffffa0006f77>] dm_table_postsuspend_targets+0x47/0x60 [dm_mod]
  [<ffffffffa0001eb5>] __dm_destroy+0x215/0x250 [dm_mod]
  [<ffffffffa0004113>] dm_destroy+0x13/0x20 [dm_mod]
  [<ffffffffa00098cd>] dev_remove+0x10d/0x170 [dm_mod]
  [<ffffffffa00097c0>] ? dev_suspend+0x240/0x240 [dm_mod]
  [<ffffffffa0009f85>] ctl_ioctl+0x255/0x4d0 [dm_mod]
  [<ffffffff8127ac00>] ? SYSC_semtimedop+0x280/0xe10
  [<ffffffffa000a213>] dm_ctl_ioctl+0x13/0x20 [dm_mod]
  [<ffffffff811fd432>] do_vfs_ioctl+0x2d2/0x4b0
  [<ffffffff81117d5f>] ? __audit_syscall_entry+0xaf/0x100
  [<ffffffff81022636>] ? do_audit_syscall_entry+0x66/0x70
  [<ffffffff811fd689>] SyS_ioctl+0x79/0x90
  [<ffffffff81023e58>] ? syscall_trace_leave+0xb8/0x110
  [<ffffffff81654f6e>] entry_SYSCALL_64_fastpath+0x12/0x71

Fix this by accounting for the call to prealloc_data_structs()
immediately _before_ the call as opposed to after.  This is needed
because it is possible to break out of the control loop after the call
to prealloc_data_structs() but before prealloc_used was set to true.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-cache-target.c

index 64e96a2bed589e48d831dabbcdcbe42aa8334323..1fe93cfea7d309a659d79fe2b953b5f2dbe7b466 100644 (file)
@@ -1967,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        bio_list_merge(&cache->deferred_bios, &bios);
@@ -1982,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
                        process_discard_bio(cache, &structs, bio);
                else
                        process_bio(cache, &structs, bio);
-               prealloc_used = true;
        }
 
        if (prealloc_used)
@@ -2011,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        list_splice(&cells, &cache->deferred_cells);
@@ -2019,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
                }
 
                process_cell(cache, &structs, cell);
-               prealloc_used = true;
        }
 
        if (prealloc_used)
@@ -2081,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
                if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
                        break; /* no work to do */
 
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs) ||
                    get_cell(cache, oblock, &structs, &old_ocell)) {
                        policy_set_dirty(cache->policy, oblock);
@@ -2088,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
                }
 
                writeback(cache, &structs, oblock, cblock, old_ocell);
-               prealloc_used = true;
        }
 
        if (prealloc_used)