mm/migrate: correct failure handling if !hugepage_migration_support()
[firefly-linux-kernel-4.4.55.git] / fs / proc / meminfo.c
index 59d85d608898354169520fb26209789b27888978..24270eceddbff0e4a7d0ca1e4aad8a669510771e 100644 (file)
@@ -1,8 +1,8 @@
 #include <linux/fs.h>
-#include <linux/hugetlb.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/hugetlb.h>
 #include <linux/mman.h>
 #include <linux/mmzone.h>
 #include <linux/proc_fs.h>
@@ -24,10 +24,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 {
        struct sysinfo i;
        unsigned long committed;
-       unsigned long allowed;
        struct vmalloc_info vmi;
        long cached;
+       long available;
+       unsigned long pagecache;
+       unsigned long wmark_low = 0;
        unsigned long pages[NR_LRU_LISTS];
+       struct zone *zone;
        int lru;
 
 /*
@@ -37,8 +40,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        si_meminfo(&i);
        si_swapinfo(&i);
        committed = percpu_counter_read_positive(&vm_committed_as);
-       allowed = ((totalram_pages - hugetlb_total_pages())
-               * sysctl_overcommit_ratio / 100) + total_swap_pages;
 
        cached = global_page_state(NR_FILE_PAGES) -
                        total_swapcache_pages() - i.bufferram;
@@ -50,12 +51,44 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
                pages[lru] = global_page_state(NR_LRU_BASE + lru);
 
+       for_each_zone(zone)
+               wmark_low += zone->watermark[WMARK_LOW];
+
+       /*
+        * Estimate the amount of memory available for userspace allocations,
+        * without causing swapping.
+        *
+        * Free memory cannot be taken below the low watermark, before the
+        * system starts swapping.
+        */
+       available = i.freeram - wmark_low;
+
+       /*
+        * Not all the page cache can be freed, otherwise the system will
+        * start swapping. Assume at least half of the page cache, or the
+        * low watermark worth of cache, needs to stay.
+        */
+       pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
+       pagecache -= min(pagecache / 2, wmark_low);
+       available += pagecache;
+
+       /*
+        * Part of the reclaimable swap consists of items that are in use,
+        * and cannot be freed. Cap this estimate at the low watermark.
+        */
+       available += global_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+
+       if (available < 0)
+               available = 0;
+
        /*
         * Tagged format, for easy grepping and expansion.
         */
        seq_printf(m,
                "MemTotal:       %8lu kB\n"
                "MemFree:        %8lu kB\n"
+               "MemAvailable:   %8lu kB\n"
                "Buffers:        %8lu kB\n"
                "Cached:         %8lu kB\n"
                "SwapCached:     %8lu kB\n"
@@ -108,6 +141,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                ,
                K(i.totalram),
                K(i.freeram),
+               K(available),
                K(i.bufferram),
                K(cached),
                K(total_swapcache_pages()),
@@ -147,7 +181,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(global_page_state(NR_UNSTABLE_NFS)),
                K(global_page_state(NR_BOUNCE)),
                K(global_page_state(NR_WRITEBACK_TEMP)),
-               K(allowed),
+               K(vm_commit_limit()),
                K(committed),
                (unsigned long)VMALLOC_TOTAL >> 10,
                vmi.used >> 10,