nfs: don't call blocking operations while !TASK_RUNNING
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index 59f450c5c0a3d8f87577021805f2f61a651ab10d..8b2d75f61b324ef5795ccd61e62c64952b6ff84e 100644 (file)
@@ -834,20 +834,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_migration_entry(entry)) {
+                       else if (is_migration_entry(entry)) {
                                page = migration_entry_to_page(entry);
 
                                if (PageAnon(page))
@@ -3200,7 +3200,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -3209,7 +3209,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
@@ -3860,12 +3860,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * space.  Kernel faults are handled more gracefully.
         */
        if (flags & FAULT_FLAG_USER)
-               mem_cgroup_enable_oom();
+               mem_cgroup_oom_enable();
 
        ret = __handle_mm_fault(mm, vma, address, flags);
 
-       if (flags & FAULT_FLAG_USER)
-               mem_cgroup_disable_oom();
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
 
        return ret;
 }