mm: vmscan: check if reclaim should really abort even if compaction_ready() is true...
[firefly-linux-kernel-4.4.55.git] / mm / nommu.c
index c4c542c736a962774f0770eef0d50c419b19a2cb..5ff9b35883ee0a3a07310dda39dad20fc758dc8a 100644 (file)
@@ -680,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp, *next;
+       struct vm_area_struct *pvma, *prev;
        struct address_space *mapping;
-       struct rb_node **p, *parent;
+       struct rb_node **p, *parent, *rb_prev;
 
        kenter(",%p", vma);
 
@@ -697,13 +697,15 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* add the VMA to the tree */
-       parent = NULL;
+       parent = rb_prev = NULL;
        p = &mm->mm_rb.rb_node;
        while (*p) {
                parent = *p;
@@ -713,17 +715,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
                 * (the latter is necessary as we may get identical VMAs) */
                if (vma->vm_start < pvma->vm_start)
                        p = &(*p)->rb_left;
-               else if (vma->vm_start > pvma->vm_start)
+               else if (vma->vm_start > pvma->vm_start) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else if (vma->vm_end < pvma->vm_end)
+               else if (vma->vm_end < pvma->vm_end)
                        p = &(*p)->rb_left;
-               else if (vma->vm_end > pvma->vm_end)
+               else if (vma->vm_end > pvma->vm_end) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else if (vma < pvma)
+               else if (vma < pvma)
                        p = &(*p)->rb_left;
-               else if (vma > pvma)
+               else if (vma > pvma) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else
+               else
                        BUG();
        }
 
@@ -731,20 +736,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 
        /* add VMA to the VMA list also */
-       for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
-               if (pvma->vm_start > vma->vm_start)
-                       break;
-               if (pvma->vm_start < vma->vm_start)
-                       continue;
-               if (pvma->vm_end < vma->vm_end)
-                       break;
-       }
+       prev = NULL;
+       if (rb_prev)
+               prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 
-       next = *pp;
-       *pp = vma;
-       vma->vm_next = next;
-       if (next)
-               next->vm_prev = vma;
+       __vma_link_list(mm, vma, prev, parent);
 }
 
 /*
@@ -752,7 +748,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
  */
 static void delete_vma_from_mm(struct vm_area_struct *vma)
 {
-       struct vm_area_struct **pp;
        struct address_space *mapping;
        struct mm_struct *mm = vma->vm_mm;
 
@@ -768,21 +763,23 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_remove(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* remove from the MM's tree and list */
        rb_erase(&vma->vm_rb, &mm->mm_rb);
-       for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
-               if (*pp == vma) {
-                       *pp = vma->vm_next;
-                       break;
-               }
-       }
 
-       vma->vm_mm = NULL;
+       if (vma->vm_prev)
+               vma->vm_prev->vm_next = vma->vm_next;
+       else
+               mm->mmap = vma->vm_next;
+
+       if (vma->vm_next)
+               vma->vm_next->vm_prev = vma->vm_prev;
 }
 
 /*
@@ -809,17 +806,15 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma;
-       struct rb_node *n = mm->mm_rb.rb_node;
 
        /* check the cache first */
        vma = mm->mmap_cache;
        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
                return vma;
 
-       /* trawl the tree (there may be multiple mappings in which addr
+       /* trawl the list (there may be multiple mappings in which addr
         * resides) */
-       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
-               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->vm_start > addr)
                        return NULL;
                if (vma->vm_end > addr) {
@@ -859,7 +854,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
                                             unsigned long len)
 {
        struct vm_area_struct *vma;
-       struct rb_node *n = mm->mm_rb.rb_node;
        unsigned long end = addr + len;
 
        /* check the cache first */
@@ -867,10 +861,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
        if (vma && vma->vm_start == addr && vma->vm_end == end)
                return vma;
 
-       /* trawl the tree (there may be multiple mappings in which addr
+       /* trawl the list (there may be multiple mappings in which addr
         * resides) */
-       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
-               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->vm_start < addr)
                        continue;
                if (vma->vm_start > addr)
@@ -1133,7 +1126,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
                           unsigned long capabilities)
 {
        struct page *pages;
-       unsigned long total, point, n, rlen;
+       unsigned long total, point, n;
        void *base;
        int ret, order;
 
@@ -1157,13 +1150,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
                 * make a private copy of the data and map that instead */
        }
 
-       rlen = PAGE_ALIGN(len);
 
        /* allocate some memory to hold the mapping
         * - note that this may not return a page-aligned address if the object
         *   we're allocating is smaller than a page
         */
-       order = get_order(rlen);
+       order = get_order(len);
        kdebug("alloc order %d for %lx", order, len);
 
        pages = alloc_pages(GFP_KERNEL, order);
@@ -1173,7 +1165,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
        total = 1 << order;
        atomic_long_add(total, &mmap_pages_allocated);
 
-       point = rlen >> PAGE_SHIFT;
+       point = len >> PAGE_SHIFT;
 
        /* we allocated a power-of-2 sized page set, so we may want to trim off
         * the excess */
@@ -1195,7 +1187,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
        base = page_address(pages);
        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
        region->vm_start = (unsigned long) base;
-       region->vm_end   = region->vm_start + rlen;
+       region->vm_end   = region->vm_start + len;
        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
 
        vma->vm_start = region->vm_start;
@@ -1211,22 +1203,22 @@ static int do_mmap_private(struct vm_area_struct *vma,
 
                old_fs = get_fs();
                set_fs(KERNEL_DS);
-               ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
+               ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
                set_fs(old_fs);
 
                if (ret < 0)
                        goto error_free;
 
                /* clear the last little bit */
-               if (ret < rlen)
-                       memset(base + ret, 0, rlen - ret);
+               if (ret < len)
+                       memset(base + ret, 0, len - ret);
 
        }
 
        return 0;
 
 error_free:
-       free_page_series(region->vm_start, region->vm_end);
+       free_page_series(region->vm_start, region->vm_top);
        region->vm_start = vma->vm_start = 0;
        region->vm_end   = vma->vm_end = 0;
        region->vm_top   = 0;
@@ -1235,7 +1227,7 @@ error_free:
 enomem:
        printk("Allocation of length %lu from process %d (%s) failed\n",
               len, current->pid, current->comm);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 }
 
@@ -1268,6 +1260,7 @@ unsigned long do_mmap_pgoff(struct file *file,
 
        /* we ignore the address hint */
        addr = 0;
+       len = PAGE_ALIGN(len);
 
        /* we've determined that we can make the mapping, now translate what we
         * now know into VMA flags */
@@ -1385,15 +1378,15 @@ unsigned long do_mmap_pgoff(struct file *file,
                if (capabilities & BDI_CAP_MAP_DIRECT) {
                        addr = file->f_op->get_unmapped_area(file, addr, len,
                                                             pgoff, flags);
-                       if (IS_ERR((void *) addr)) {
+                       if (IS_ERR_VALUE(addr)) {
                                ret = addr;
-                               if (ret != (unsigned long) -ENOSYS)
+                               if (ret != -ENOSYS)
                                        goto error_just_free;
 
                                /* the driver refused to tell us where to site
                                 * the mapping so we'll have to attempt to copy
                                 * it */
-                               ret = (unsigned long) -ENODEV;
+                               ret = -ENODEV;
                                if (!(capabilities & BDI_CAP_MAP_COPY))
                                        goto error_just_free;
 
@@ -1468,14 +1461,14 @@ error_getting_vma:
        printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
               " from process %d failed\n",
               len, current->pid);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 
 error_getting_region:
        printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
               " from process %d failed\n",
               len, current->pid);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 }
 EXPORT_SYMBOL(do_mmap_pgoff);
@@ -1644,15 +1637,17 @@ static int shrink_vma(struct mm_struct *mm,
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 {
        struct vm_area_struct *vma;
-       struct rb_node *rb;
-       unsigned long end = start + len;
+       unsigned long end;
        int ret;
 
        kenter(",%lx,%zx", start, len);
 
+       len = PAGE_ALIGN(len);
        if (len == 0)
                return -EINVAL;
 
+       end = start + len;
+
        /* find the first potentially overlapping VMA */
        vma = find_vma(mm, start);
        if (!vma) {
@@ -1677,9 +1672,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                        }
                        if (end == vma->vm_end)
                                goto erase_whole_vma;
-                       rb = rb_next(&vma->vm_rb);
-                       vma = rb_entry(rb, struct vm_area_struct, vm_rb);
-               } while (rb);
+                       vma = vma->vm_next;
+               } while (vma);
                kleave(" = -EINVAL [split file]");
                return -EINVAL;
        } else {
@@ -1773,6 +1767,8 @@ unsigned long do_mremap(unsigned long addr,
        struct vm_area_struct *vma;
 
        /* insanity checks first */
+       old_len = PAGE_ALIGN(old_len);
+       new_len = PAGE_ALIGN(new_len);
        if (old_len == 0 || new_len == 0)
                return (unsigned long) -EINVAL;
 
@@ -1819,10 +1815,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
        return NULL;
 }
 
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-               unsigned long to, unsigned long size, pgprot_t prot)
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-       vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+       if (addr != (pfn << PAGE_SHIFT))
+               return -EINVAL;
+
+       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
        return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -2064,6 +2063,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        down_write(&nommu_region_sem);
+       mutex_lock(&inode->i_mapping->i_mmap_mutex);
 
        /* search for VMAs that fall within the dead zone */
        vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2071,6 +2071,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                /* found one - only interested if it's shared out of the page
                 * cache */
                if (vma->vm_flags & VM_SHARED) {
+                       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
                        up_write(&nommu_region_sem);
                        return -ETXTBSY; /* not quite true, but near enough */
                }
@@ -2098,6 +2099,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                }
        }
 
+       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
        up_write(&nommu_region_sem);
        return 0;
 }