mm: vmscan: check if reclaim should really abort even if compaction_ready() is true...
[firefly-linux-kernel-4.4.55.git] / mm / memory-failure.c
index eac0ba5614912e57c7b83b22382bc1165138ef2a..6496748df2140025b7cc95b81fda416e14b4ed86 100644 (file)
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct task_struct *tsk;
        struct anon_vma *av;
 
-       read_lock(&tasklist_lock);
        av = page_lock_anon_vma(page);
        if (av == NULL) /* Not actually mapped anymore */
-               goto out;
+               return;
+
+       read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       page_unlock_anon_vma(av);
-out:
        read_unlock(&tasklist_lock);
+       page_unlock_anon_vma(av);
 }
 
 /*
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        struct prio_tree_iter iter;
        struct address_space *mapping = page->mapping;
 
-       /*
-        * A note on the locking order between the two locks.
-        * We don't rely on this particular order.
-        * If you have some other code that needs a different order
-        * feel free to switch them around. Or add a reverse link
-        * from mm_struct to task_struct, then this could be all
-        * done without taking tasklist_lock and looping over all tasks.
-        */
-
-       read_lock(&tasklist_lock);
        mutex_lock(&mapping->i_mmap_mutex);
+       read_lock(&tasklist_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       mutex_unlock(&mapping->i_mmap_mutex);
        read_unlock(&tasklist_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 /*
@@ -1473,7 +1464,7 @@ int soft_offline_page(struct page *page, int flags)
                                            page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
-                                                               0, true);
+                                                       0, MIGRATE_SYNC);
                if (ret) {
                        putback_lru_pages(&pagelist);
                        pr_info("soft offline: %#lx: migration failed %d, type %lx\n",