mm: move page->mem_cgroup bad page handling into generic code
[firefly-linux-kernel-4.4.55.git] / mm / page_alloc.c
index e32121fa2ba950a6e894ddd94fe22924e60659de..a7198c065999c564f41be65ba7e66899913c375b 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
-#include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
 #include <linux/compaction.h>
@@ -641,8 +640,10 @@ static inline int free_pages_check(struct page *page)
                bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
                bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
        }
-       if (unlikely(mem_cgroup_bad_page_check(page)))
-               bad_reason = "cgroup check failed";
+#ifdef CONFIG_MEMCG
+       if (unlikely(page->mem_cgroup))
+               bad_reason = "page still charged to cgroup";
+#endif
        if (unlikely(bad_reason)) {
                bad_page(page, bad_reason, bad_flags);
                return 1;
@@ -901,8 +902,10 @@ static inline int check_new_page(struct page *page)
                bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
                bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
        }
-       if (unlikely(mem_cgroup_bad_page_check(page)))
-               bad_reason = "cgroup check failed";
+#ifdef CONFIG_MEMCG
+       if (unlikely(page->mem_cgroup))
+               bad_reason = "page still charged to cgroup";
+#endif
        if (unlikely(bad_reason)) {
                bad_page(page, bad_reason, bad_flags);
                return 1;
@@ -1739,7 +1742,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
                        unsigned long mark, int classzone_idx, int alloc_flags,
                        long free_pages)
 {
-       /* free_pages my go negative - that's OK */
+       /* free_pages may go negative - that's OK */
        long min = mark;
        int o;
        long free_cma = 0;
@@ -2330,7 +2333,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        int classzone_idx, int migratetype, enum migrate_mode mode,
        int *contended_compaction, bool *deferred_compaction)
 {
-       struct zone *last_compact_zone = NULL;
        unsigned long compact_result;
        struct page *page;
 
@@ -2341,8 +2343,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
                                                nodemask, mode,
                                                contended_compaction,
-                                               alloc_flags, classzone_idx,
-                                               &last_compact_zone);
+                                               alloc_flags, classzone_idx);
        current->flags &= ~PF_MEMALLOC;
 
        switch (compact_result) {
@@ -2361,10 +2362,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
         */
        count_vm_event(COMPACTSTALL);
 
-       /* Page migration frees to the PCP lists but we want merging */
-       drain_pages(get_cpu());
-       put_cpu();
-
        page = get_page_from_freelist(gfp_mask, nodemask,
                        order, zonelist, high_zoneidx,
                        alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2379,14 +2376,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                return page;
        }
 
-       /*
-        * last_compact_zone is where try_to_compact_pages thought allocation
-        * should succeed, so it did not defer compaction. But here we know
-        * that it didn't succeed, so we do the defer.
-        */
-       if (last_compact_zone && mode != MIGRATE_ASYNC)
-               defer_compaction(last_compact_zone, order);
-
        /*
         * It's bad if compaction run occurs and fails. The most likely reason
         * is that pages exist, but not enough to satisfy watermarks.
@@ -4867,7 +4856,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 #endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
-       pgdat_page_cgroup_init(pgdat);
 
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;