page allocator: use a pre-calculated value instead of num_online_nodes() in fast...
[firefly-linux-kernel-4.4.55.git] / mm / page_alloc.c
index 03a386d24ef237c03305c4517df511360ad5fbd5..0c9f406e3c44ad143713c0dfdc7724736dcf8e35 100644 (file)
@@ -161,7 +161,9 @@ static unsigned long __meminitdata dma_reserve;
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
 int page_group_by_mobility_disabled __read_mostly;
@@ -421,7 +423,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
                return 0;
 
        if (PageBuddy(buddy) && page_order(buddy) == order) {
-               BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON(page_count(buddy) != 0);
                return 1;
        }
        return 0;
@@ -456,7 +458,6 @@ static inline void __free_one_page(struct page *page,
                int migratetype)
 {
        unsigned long page_idx;
-       int order_size = 1 << order;
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
@@ -466,10 +467,9 @@ static inline void __free_one_page(struct page *page,
 
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 
-       VM_BUG_ON(page_idx & (order_size - 1));
+       VM_BUG_ON(page_idx & ((1 << order) - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct page *buddy;
@@ -497,7 +497,7 @@ static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0) |
                (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
                bad_page(page);
                return 1;
@@ -524,6 +524,8 @@ static void free_pages_bulk(struct zone *zone, int count,
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
        while (count--) {
                struct page *page;
 
@@ -542,6 +544,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }
@@ -642,7 +646,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0)  |
                (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
                bad_page(page);
                return 1;
@@ -686,7 +690,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area, migratetype);
                return page;
        }
@@ -826,8 +829,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        /* Remove the page from the freelists */
                        list_del(&page->lru);
                        rmv_page_order(page);
-                       __mod_zone_page_state(zone, NR_FREE_PAGES,
-                                                       -(1UL << order));
 
                        if (current_order == pageblock_order)
                                set_pageblock_migratetype(page,
@@ -900,6 +901,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                set_page_private(page, migratetype);
                list = &page->lru;
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
        spin_unlock(&zone->lock);
        return i;
 }
@@ -1030,6 +1032,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
+       set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
        if (unlikely(clearMlocked))
                free_page_mlock(page);
@@ -1039,7 +1042,6 @@ static void free_hot_cold_page(struct page *page, int cold)
                list_add_tail(&page->lru, &pcp->list);
        else
                list_add(&page->lru, &pcp->list);
-       set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -1129,6 +1131,7 @@ again:
        } else {
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
@@ -1150,10 +1153,15 @@ failed:
        return NULL;
 }
 
-#define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
-#define ALLOC_WMARK_MIN                0x02 /* use pages_min watermark */
-#define ALLOC_WMARK_LOW                0x04 /* use pages_low watermark */
-#define ALLOC_WMARK_HIGH       0x08 /* use pages_high watermark */
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
+#define ALLOC_WMARK_MIN                WMARK_MIN
+#define ALLOC_WMARK_LOW                WMARK_LOW
+#define ALLOC_WMARK_HIGH       WMARK_HIGH
+#define ALLOC_NO_WATERMARKS    0x04 /* don't check watermarks at all */
+
+/* Mask to get the watermark bits */
+#define ALLOC_WMARK_MASK       (ALLOC_NO_WATERMARKS-1)
+
 #define ALLOC_HARDER           0x10 /* try to alloc harder */
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
@@ -1440,14 +1448,10 @@ zonelist_scan:
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
-                       if (alloc_flags & ALLOC_WMARK_MIN)
-                               mark = zone->pages_min;
-                       else if (alloc_flags & ALLOC_WMARK_LOW)
-                               mark = zone->pages_low;
-                       else
-                               mark = zone->pages_high;
+                       mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
                        if (!zone_watermark_ok(zone, order, mark,
                                    classzone_idx, alloc_flags)) {
                                if (!zone_reclaim_mode ||
@@ -1464,8 +1468,11 @@ this_zone_full:
                if (NUMA_BUILD)
                        zlc_mark_zone_full(zonelist, z);
 try_next_zone:
-               if (NUMA_BUILD && !did_zlc_setup) {
-                       /* we do zlc_setup after the first zone is tried */
+               if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+                       /*
+                        * we do zlc_setup after the first zone is tried but only
+                        * if there are multiple nodes make it worthwhile
+                        */
                        allowednodes = zlc_setup(zonelist, alloc_flags);
                        zlc_active = 1;
                        did_zlc_setup = 1;
@@ -1956,7 +1963,7 @@ static unsigned int nr_free_zone_pages(int offset)
 
        for_each_zone_zonelist(zone, z, zonelist, offset) {
                unsigned long size = zone->present_pages;
-               unsigned long high = zone->pages_high;
+               unsigned long high = high_wmark_pages(zone);
                if (size > high)
                        sum += size - high;
        }
@@ -2093,9 +2100,9 @@ void show_free_areas(void)
                        "\n",
                        zone->name,
                        K(zone_page_state(zone, NR_FREE_PAGES)),
-                       K(zone->pages_min),
-                       K(zone->pages_low),
-                       K(zone->pages_high),
+                       K(min_wmark_pages(zone)),
+                       K(low_wmark_pages(zone)),
+                       K(high_wmark_pages(zone)),
                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
@@ -2260,7 +2267,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 }
 
 
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
 static int node_load[MAX_NUMNODES];
 
 /**
@@ -2469,7 +2476,7 @@ static void build_zonelists(pg_data_t *pgdat)
 
        /* NUMA-aware ordering of nodes */
        local_node = pgdat->node_id;
-       load = num_online_nodes();
+       load = nr_online_nodes;
        prev_node = local_node;
        nodes_clear(used_mask);
 
@@ -2620,7 +2627,7 @@ void build_all_zonelists(void)
 
        printk("Built %i zonelists in %s order, mobility grouping %s.  "
                "Total pages: %ld\n",
-                       num_online_nodes(),
+                       nr_online_nodes,
                        zonelist_order_name[current_zonelist_order],
                        page_group_by_mobility_disabled ? "off" : "on",
                        vm_total_pages);
@@ -2699,8 +2706,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
 
 /*
  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
- * of blocks reserved is based on zone->pages_min. The memory within the
- * reserve will tend to store contiguous free pages. Setting min_free_kbytes
+ * of blocks reserved is based on min_wmark_pages(zone). The memory within
+ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  * higher will lead to a bigger reserve which will get freed as contiguous
  * blocks as reclaim kicks in
  */
@@ -2713,7 +2720,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
-       reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
+       reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -4316,8 +4323,8 @@ static void calculate_totalreserve_pages(void)
                                        max = zone->lowmem_reserve[j];
                        }
 
-                       /* we treat pages_high as reserved pages. */
-                       max += zone->pages_high;
+                       /* we treat the high watermark as reserved pages. */
+                       max += high_wmark_pages(zone);
 
                        if (max > zone->present_pages)
                                max = zone->present_pages;
@@ -4397,7 +4404,7 @@ void setup_per_zone_pages_min(void)
                         * need highmem pages, so cap pages_min to a small
                         * value here.
                         *
-                        * The (pages_high-pages_low) and (pages_low-pages_min)
+                        * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
                         * deltas controls asynch page reclaim, and so should
                         * not be capped for highmem.
                         */
@@ -4408,17 +4415,17 @@ void setup_per_zone_pages_min(void)
                                min_pages = SWAP_CLUSTER_MAX;
                        if (min_pages > 128)
                                min_pages = 128;
-                       zone->pages_min = min_pages;
+                       zone->watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->pages_min = tmp;
+                       zone->watermark[WMARK_MIN] = tmp;
                }
 
-               zone->pages_low   = zone->pages_min + (tmp >> 2);
-               zone->pages_high  = zone->pages_min + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -4563,7 +4570,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  *     whenever sysctl_lowmem_reserve_ratio changes.
  *
  * The reserve ratio obviously has absolutely no relation with the
- * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * minimum watermarks. The lowmem reserve ratio can only make sense
  * if in function of the boot time zone sizes.
  */
 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,