DM RAID: Break-up untidy function
authorJonathan Brassow <jbrassow@redhat.com>
Wed, 8 May 2013 22:57:13 +0000 (17:57 -0500)
committerNeilBrown <neilb@suse.de>
Thu, 13 Jun 2013 22:10:25 +0000 (08:10 +1000)
DM RAID:  Break-up untidy function

Clean-up excessive indentation by moving some code in raid_resume()
into its own function.

Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/dm-raid.c

index facaf9142d5a444f3c546a7e3c4ae108745b7f14..59d15ec0ba81e62c6c799413f4ce0644128e0137 100644 (file)
@@ -1572,15 +1572,51 @@ static void raid_postsuspend(struct dm_target *ti)
        mddev_suspend(&rs->md);
 }
 
-static void raid_resume(struct dm_target *ti)
+static void attempt_restore_of_faulty_devices(struct raid_set *rs)
 {
        int i;
        uint64_t failed_devices, cleared_failed_devices = 0;
        unsigned long flags;
        struct dm_raid_superblock *sb;
-       struct raid_set *rs = ti->private;
        struct md_rdev *r;
 
+       for (i = 0; i < rs->md.raid_disks; i++) {
+               r = &rs->dev[i].rdev;
+               if (test_bit(Faulty, &r->flags) && r->sb_page &&
+                   sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+                       DMINFO("Faulty %s device #%d has readable super block."
+                              "  Attempting to revive it.",
+                              rs->raid_type->name, i);
+                       r->raid_disk = i;
+                       r->saved_raid_disk = i;
+                       flags = r->flags;
+                       clear_bit(Faulty, &r->flags);
+                       clear_bit(WriteErrorSeen, &r->flags);
+                       clear_bit(In_sync, &r->flags);
+                       if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+                               r->raid_disk = -1;
+                               r->saved_raid_disk = -1;
+                               r->flags = flags;
+                       } else {
+                               r->recovery_offset = 0;
+                               cleared_failed_devices |= 1 << i;
+                       }
+               }
+       }
+       if (cleared_failed_devices) {
+               rdev_for_each(r, &rs->md) {
+                       sb = page_address(r->sb_page);
+                       failed_devices = le64_to_cpu(sb->failed_devices);
+                       failed_devices &= ~cleared_failed_devices;
+                       sb->failed_devices = cpu_to_le64(failed_devices);
+               }
+       }
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+       struct raid_set *rs = ti->private;
+
        set_bit(MD_CHANGE_DEVS, &rs->md.flags);
        if (!rs->bitmap_loaded) {
                bitmap_load(&rs->md);
@@ -1591,37 +1627,7 @@ static void raid_resume(struct dm_target *ti)
                 * Take this opportunity to check whether any failed
                 * devices are reachable again.
                 */
-               for (i = 0; i < rs->md.raid_disks; i++) {
-                       r = &rs->dev[i].rdev;
-                       if (test_bit(Faulty, &r->flags) && r->sb_page &&
-                           sync_page_io(r, 0, r->sb_size,
-                                        r->sb_page, READ, 1)) {
-                               DMINFO("Faulty device #%d has readable super"
-                                      "block.  Attempting to revive it.", i);
-                               r->raid_disk = i;
-                               r->saved_raid_disk = i;
-                               flags = r->flags;
-                               clear_bit(Faulty, &r->flags);
-                               clear_bit(WriteErrorSeen, &r->flags);
-                               clear_bit(In_sync, &r->flags);
-                               if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
-                                       r->raid_disk = -1;
-                                       r->saved_raid_disk = -1;
-                                       r->flags = flags;
-                               } else {
-                                       r->recovery_offset = 0;
-                                       cleared_failed_devices |= 1 << i;
-                               }
-                       }
-               }
-               if (cleared_failed_devices) {
-                       rdev_for_each(r, &rs->md) {
-                               sb = page_address(r->sb_page);
-                               failed_devices = le64_to_cpu(sb->failed_devices);
-                               failed_devices &= ~cleared_failed_devices;
-                               sb->failed_devices = cpu_to_le64(failed_devices);
-                       }
-               }
+               attempt_restore_of_faulty_devices(rs);
        }
 
        clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);