pinctrl: pfc: r8a7790: add i2c0 muxing
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index 4eefb7e3152170306fddf100d2457e9370b1a327..d0f0bef3be488af9eb9406cc5d28272093abb5a6 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/migrate.h>
 #include <linux/string.h>
 #include <linux/dma-debug.h>
+#include <linux/debugfs.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -2781,7 +2782,7 @@ reuse:
                 */
                if (!page_mkwrite) {
                        wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page, page_mkwrite);
+                       set_page_dirty_balance(dirty_page);
                        /* file_update_time outside page_lock */
                        if (vma->vm_file)
                                file_update_time(vma->vm_file);
@@ -2827,7 +2828,7 @@ gotten:
        }
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        mmun_start  = address & PAGE_MASK;
@@ -3280,7 +3281,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        __SetPageUptodate(page);
 
-       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_charge_anon(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -3382,8 +3383,63 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
 }
 
 #define FAULT_AROUND_ORDER 4
-#define FAULT_AROUND_PAGES (1UL << FAULT_AROUND_ORDER)
-#define FAULT_AROUND_MASK ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1)
+
+#ifdef CONFIG_DEBUG_FS
+static unsigned int fault_around_order = FAULT_AROUND_ORDER;
+
+static int fault_around_order_get(void *data, u64 *val)
+{
+       *val = fault_around_order;
+       return 0;
+}
+
+static int fault_around_order_set(void *data, u64 val)
+{
+       BUILD_BUG_ON((1UL << FAULT_AROUND_ORDER) > PTRS_PER_PTE);
+       if (1UL << val > PTRS_PER_PTE)
+               return -EINVAL;
+       fault_around_order = val;
+       return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fault_around_order_fops,
+               fault_around_order_get, fault_around_order_set, "%llu\n");
+
+static int __init fault_around_debugfs(void)
+{
+       void *ret;
+
+       ret = debugfs_create_file("fault_around_order", 0644, NULL, NULL,
+                       &fault_around_order_fops);
+       if (!ret)
+               pr_warn("Failed to create fault_around_order in debugfs");
+       return 0;
+}
+late_initcall(fault_around_debugfs);
+
+static inline unsigned long fault_around_pages(void)
+{
+       return 1UL << fault_around_order;
+}
+
+static inline unsigned long fault_around_mask(void)
+{
+       return ~((1UL << (PAGE_SHIFT + fault_around_order)) - 1);
+}
+#else
+static inline unsigned long fault_around_pages(void)
+{
+       unsigned long nr_pages;
+
+       nr_pages = 1UL << FAULT_AROUND_ORDER;
+       BUILD_BUG_ON(nr_pages > PTRS_PER_PTE);
+       return nr_pages;
+}
+
+static inline unsigned long fault_around_mask(void)
+{
+       return ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1);
+}
+#endif
 
 static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
                pte_t *pte, pgoff_t pgoff, unsigned int flags)
@@ -3393,21 +3449,19 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
        struct vm_fault vmf;
        int off;
 
-       BUILD_BUG_ON(FAULT_AROUND_PAGES > PTRS_PER_PTE);
-
-       start_addr = max(address & FAULT_AROUND_MASK, vma->vm_start);
+       start_addr = max(address & fault_around_mask(), vma->vm_start);
        off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
        pte -= off;
        pgoff -= off;
 
        /*
         *  max_pgoff is either end of page table or end of vma
-        *  or FAULT_AROUND_PAGES from pgoff, depending what is neast.
+        *  or fault_around_pages() from pgoff, depending what is neast.
         */
        max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
                PTRS_PER_PTE - 1;
        max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
-                       pgoff + FAULT_AROUND_PAGES - 1);
+                       pgoff + fault_around_pages() - 1);
 
        /* Check if it makes any sense to call ->map_pages */
        while (!pte_none(*pte)) {
@@ -3483,7 +3537,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!new_page)
                return VM_FAULT_OOM;
 
-       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) {
+       if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) {
                page_cache_release(new_page);
                return VM_FAULT_OOM;
        }