bna: remove useless calls to memset().
[firefly-linux-kernel-4.4.55.git] / mm / page_alloc.c
index 5b74de6702e06587e0d4f36060f810526ff8fbe3..92871579cbee3b8feb12376df03ad2969f942c48 100644 (file)
@@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
 
        mt = get_pageblock_migratetype(page);
        if (unlikely(mt != MIGRATE_ISOLATE))
-               __mod_zone_freepage_state(zone, -(1UL << order), mt);
+               __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
 
        if (alloc_order != order)
                expand(zone, page, alloc_order, order,
@@ -2416,8 +2416,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                goto nopage;
 
 restart:
-       wake_all_kswapd(order, zonelist, high_zoneidx,
-                                       zone_idx(preferred_zone));
+       if (!(gfp_mask & __GFP_NO_KSWAPD))
+               wake_all_kswapd(order, zonelist, high_zoneidx,
+                                               zone_idx(preferred_zone));
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -2494,7 +2495,7 @@ rebalance:
         * system then fail the allocation instead of entering direct reclaim.
         */
        if ((deferred_compaction || contended_compaction) &&
-           (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
+                                               (gfp_mask & __GFP_NO_KSWAPD))
                goto nopage;
 
        /* Try direct reclaim and then allocating */
@@ -4505,7 +4506,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone->zone_pgdat = pgdat;
 
                zone_pcp_init(zone);
-               lruvec_init(&zone->lruvec, zone);
+               lruvec_init(&zone->lruvec);
                if (!size)
                        continue;
 
@@ -6098,37 +6099,3 @@ void dump_page(struct page *page)
        dump_page_flags(page->flags);
        mem_cgroup_print_bad_page(page);
 }
-
-/* reset zone->present_pages */
-void reset_zone_present_pages(void)
-{
-       struct zone *z;
-       int i, nid;
-
-       for_each_node_state(nid, N_HIGH_MEMORY) {
-               for (i = 0; i < MAX_NR_ZONES; i++) {
-                       z = NODE_DATA(nid)->node_zones + i;
-                       z->present_pages = 0;
-               }
-       }
-}
-
-/* calculate zone's present pages in buddy system */
-void fixup_zone_present_pages(int nid, unsigned long start_pfn,
-                               unsigned long end_pfn)
-{
-       struct zone *z;
-       unsigned long zone_start_pfn, zone_end_pfn;
-       int i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               z = NODE_DATA(nid)->node_zones + i;
-               zone_start_pfn = z->zone_start_pfn;
-               zone_end_pfn = zone_start_pfn + z->spanned_pages;
-
-               /* if the two regions intersect */
-               if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn))
-                       z->present_pages += min(end_pfn, zone_end_pfn) -
-                                           max(start_pfn, zone_start_pfn);
-       }
-}