Merge tag 'sound-3.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[firefly-linux-kernel-4.4.55.git] / drivers / md / md.c
index 9233c71138f18a62a4c175d7acf9c3b8fd03a3d5..709755fb6d7b62823d2291c99397ca0111adcbaf 100644 (file)
@@ -247,7 +247,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
 {
        const int rw = bio_data_dir(bio);
        struct mddev *mddev = q->queuedata;
-       int cpu;
        unsigned int sectors;
 
        if (mddev == NULL || mddev->pers == NULL
@@ -284,10 +283,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
        sectors = bio_sectors(bio);
        mddev->pers->make_request(mddev, bio);
 
-       cpu = part_stat_lock();
-       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
-       part_stat_unlock();
+       generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
 
        if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
                wake_up(&mddev->sb_wait);
@@ -2695,7 +2691,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
        if (kstrtoull(buf, 10, &new_offset) < 0)
                return -EINVAL;
 
-       if (mddev->sync_thread)
+       if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
                return -EBUSY;
        if (new_offset == rdev->data_offset)
                /* reset is always permitted */
@@ -3272,6 +3269,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
         */
 
        if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            mddev->reshape_position != MaxSector ||
            mddev->sysfs_active)
                return -EBUSY;
@@ -4026,6 +4024,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
        if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
+               flush_workqueue(md_misc_wq);
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        md_reap_sync_thread(mddev);
@@ -5044,6 +5043,7 @@ static void md_clean(struct mddev *mddev)
 static void __md_stop_writes(struct mddev *mddev)
 {
        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+       flush_workqueue(md_misc_wq);
        if (mddev->sync_thread) {
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                md_reap_sync_thread(mddev);
@@ -5104,19 +5104,22 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (mddev->sync_thread) {
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       if (mddev->sync_thread)
                /* Thread might be blocked waiting for metadata update
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
-       }
+
        mddev_unlock(mddev);
-       wait_event(resync_wait, mddev->sync_thread == NULL);
+       wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+                                         &mddev->recovery));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
        if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
            mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
                printk("md: %s still in use.\n",mdname(mddev));
                if (did_freeze) {
@@ -5162,20 +5165,24 @@ static int do_md_stop(struct mddev *mddev, int mode,
                set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
        }
-       if (mddev->sync_thread) {
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       if (mddev->sync_thread)
                /* Thread might be blocked waiting for metadata update
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
-       }
+
        mddev_unlock(mddev);
-       wait_event(resync_wait, mddev->sync_thread == NULL);
+       wait_event(resync_wait, (mddev->sync_thread == NULL &&
+                                !test_bit(MD_RECOVERY_RUNNING,
+                                          &mddev->recovery)));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
        if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
            mddev->sysfs_active ||
            mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
@@ -5950,7 +5957,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
         * of each device.  If num_sectors is zero, we find the largest size
         * that fits.
         */
-       if (mddev->sync_thread)
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+           mddev->sync_thread)
                return -EBUSY;
        if (mddev->ro)
                return -EROFS;
@@ -5981,7 +5989,9 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
        if (raid_disks <= 0 ||
            (mddev->max_disks && raid_disks >= mddev->max_disks))
                return -EINVAL;
-       if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+       if (mddev->sync_thread ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+           mddev->reshape_position != MaxSector)
                return -EBUSY;
 
        rdev_for_each(rdev, mddev) {
@@ -6969,7 +6979,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
        int mask;
 
        if (md_unloading)
-               return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
+               return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
        poll_wait(filp, &md_event_waiters, wait);
 
        /* always allow read */
@@ -7593,6 +7603,7 @@ static void md_start_sync(struct work_struct *ws)
                clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+               wake_up(&resync_wait);
                if (test_and_clear_bit(MD_RECOVERY_RECOVER,
                                       &mddev->recovery))
                        if (mddev->sysfs_action)
@@ -7761,6 +7772,7 @@ void md_check_recovery(struct mddev *mddev)
        not_running:
                if (!mddev->sync_thread) {
                        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+                       wake_up(&resync_wait);
                        if (test_and_clear_bit(MD_RECOVERY_RECOVER,
                                               &mddev->recovery))
                                if (mddev->sysfs_action)
@@ -7779,7 +7791,6 @@ void md_reap_sync_thread(struct mddev *mddev)
 
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
-       wake_up(&resync_wait);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                /* success...*/
@@ -7807,6 +7818,7 @@ void md_reap_sync_thread(struct mddev *mddev)
        clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       wake_up(&resync_wait);
        /* flag recovery needed just to double check */
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        sysfs_notify_dirent_safe(mddev->sysfs_action);