X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=mm%2Fmemory.c;h=53c1da0d04a68d02bd1fd9eba8c925ceda59fec8;hb=3332f84e0b969bd699d783e00e72a9110929eeed;hp=6ab19dd4a1990f3f463c1db3eee4f19ce4f5a3de;hpb=add810a1c58ba9edb6076ababe5861f36c049217;p=firefly-linux-kernel-4.4.55.git diff --git a/mm/memory.c b/mm/memory.c index 6ab19dd4a199..53c1da0d04a6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1282,10 +1282,20 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, return i ? : -EFAULT; } if (pages) { - struct page *page = vm_normal_page(gate_vma, start, *pte); + struct page *page; + + page = vm_normal_page(gate_vma, start, *pte); + if (!page) { + if (!(gup_flags & FOLL_DUMP) && + is_zero_pfn(pte_pfn(*pte))) + page = pte_page(*pte); + else { + pte_unmap(pte); + return i ? : -EFAULT; + } + } pages[i] = page; - if (page) - get_page(page); + get_page(page); } pte_unmap(pte); if (vmas) @@ -2514,7 +2524,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_HWPOISON; } else { print_bad_pte(vma, address, orig_pte, NULL); - ret = VM_FAULT_OOM; + ret = VM_FAULT_SIGBUS; } goto out; } @@ -2619,6 +2629,40 @@ out_release: return ret; } +/* + * This is like a special single-page "expand_{down|up}wards()", + * except we must first make sure that 'address{-|+}PAGE_SIZE' + * doesn't hit another vma. + */ +static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +{ + address &= PAGE_MASK; + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { + struct vm_area_struct *prev = vma->vm_prev; + + /* + * Is there a mapping abutting this one below? + * + * That's only ok if it's the same stack mapping + * that has gotten split.. + */ + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; + + expand_stack(vma, address - PAGE_SIZE); + } + if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { + struct vm_area_struct *next = vma->vm_next; + + /* As VM_GROWSDOWN but s/below/above/ */ + if (next && next->vm_start == address + PAGE_SIZE) + return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; + + expand_upwards(vma, address + PAGE_SIZE); + } + return 0; +} + /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -2632,19 +2676,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; + pte_unmap(page_table); + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; + + /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); - ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ - pte_unmap(page_table); - if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); @@ -2910,7 +2958,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, * Page table corrupted: show pte and kill process. */ print_bad_pte(vma, address, orig_pte, NULL); - return VM_FAULT_OOM; + return VM_FAULT_SIGBUS; } pgoff = pte_to_pgoff(orig_pte);