mm/hugetlb: create hugetlb cgroup file in hugetlb_init
[firefly-linux-kernel-4.4.55.git] / mm / page_alloc.c
index 5a8d339d282a88cd0c5687596eca8023f93846ba..2ad2ad168efe930bb75391848e71f4ac8c61099b 100644 (file)
@@ -89,6 +89,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 #ifdef CONFIG_HIGHMEM
        [N_HIGH_MEMORY] = { { [0] = 1UL } },
+#endif
+#ifdef CONFIG_MOVABLE_NODE
+       [N_MEMORY] = { { [0] = 1UL } },
 #endif
        [N_CPU] = { { [0] = 1UL } },
 #endif /* NUMA */
@@ -368,8 +371,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
        int nr_pages = 1 << order;
        int bad = 0;
 
-       if (unlikely(compound_order(page) != order) ||
-           unlikely(!PageHead(page))) {
+       if (unlikely(compound_order(page) != order)) {
                bad_page(page);
                bad++;
        }
@@ -523,7 +525,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  * If a block is freed, and its buddy is also free, then this
  * triggers coalescing into a block of larger size.
  *
- * -- wli
+ * -- nyc
  */
 
 static inline void __free_one_page(struct page *page,
@@ -608,6 +610,7 @@ static inline int free_pages_check(struct page *page)
                bad_page(page);
                return 1;
        }
+       reset_page_last_nid(page);
        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
        return 0;
@@ -732,6 +735,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        local_irq_restore(flags);
 }
 
+/*
+ * Read access to zone->managed_pages is safe because it's unsigned long,
+ * but we still need to serialize writers. Currently all callers of
+ * __free_pages_bootmem() except put_page_bootmem() should only be used
+ * at boot time. So for shorter boot time, we shift the burden to
+ * put_page_bootmem() to serialize writers.
+ */
 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 {
        unsigned int nr_pages = 1 << order;
@@ -747,6 +757,7 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
                set_page_count(p, 0);
        }
 
+       page_zone(page)->managed_pages += 1 << order;
        set_page_refcounted(page);
        __free_pages(page, order);
 }
@@ -782,7 +793,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
  * large block of memory acted on by a series of small allocations.
  * This behavior is a critical factor in sglist merging's success.
  *
- * -- wli
+ * -- nyc
  */
 static inline void expand(struct zone *zone, struct page *page,
        int low, int high, struct free_area *area,
@@ -1695,7 +1706,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
  *
  * If the zonelist cache is present in the passed in zonelist, then
  * returns a pointer to the allowed node mask (either the current
- * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
+ * tasks mems_allowed, or node_states[N_MEMORY].)
  *
  * If the zonelist cache is not available for this zonelist, does
  * nothing and returns NULL.
@@ -1724,7 +1735,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
 
        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
                                        &cpuset_current_mems_allowed :
-                                       &node_states[N_HIGH_MEMORY];
+                                       &node_states[N_MEMORY];
        return allowednodes;
 }
 
@@ -2601,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        int migratetype = allocflags_to_migratetype(gfp_mask);
        unsigned int cpuset_mems_cookie;
        int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
+       struct mem_cgroup *memcg = NULL;
 
        gfp_mask &= gfp_allowed_mask;
 
@@ -2619,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
+       /*
+        * Will only have any effect when __GFP_KMEMCG is set.  This is
+        * verified in the (always inline) callee
+        */
+       if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
+               return NULL;
+
 retry_cpuset:
        cpuset_mems_cookie = get_mems_allowed();
 
@@ -2654,6 +2673,8 @@ out:
        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
                goto retry_cpuset;
 
+       memcg_kmem_commit_charge(page, memcg, order);
+
        return page;
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2706,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+/*
+ * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
+ * pages allocated with __GFP_KMEMCG.
+ *
+ * Those pages are accounted to a particular memcg, embedded in the
+ * corresponding page_cgroup. To avoid adding a hit in the allocator to search
+ * for that information only to find out that it is NULL for users who have no
+ * interest in that whatsoever, we provide these functions.
+ *
+ * The caller knows better which flags it relies on.
+ */
+void __free_memcg_kmem_pages(struct page *page, unsigned int order)
+{
+       memcg_kmem_uncharge_pages(page, order);
+       __free_pages(page, order);
+}
+
+void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
+{
+       if (addr != 0) {
+               VM_BUG_ON(!virt_addr_valid((void *)addr));
+               __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
+       }
+}
+
 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
 {
        if (addr) {
@@ -2981,6 +3027,7 @@ void show_free_areas(unsigned int filter)
                        " isolated(anon):%lukB"
                        " isolated(file):%lukB"
                        " present:%lukB"
+                       " managed:%lukB"
                        " mlocked:%lukB"
                        " dirty:%lukB"
                        " writeback:%lukB"
@@ -3010,6 +3057,7 @@ void show_free_areas(unsigned int filter)
                        K(zone_page_state(zone, NR_ISOLATED_ANON)),
                        K(zone_page_state(zone, NR_ISOLATED_FILE)),
                        K(zone->present_pages),
+                       K(zone->managed_pages),
                        K(zone_page_state(zone, NR_MLOCK)),
                        K(zone_page_state(zone, NR_FILE_DIRTY)),
                        K(zone_page_state(zone, NR_WRITEBACK)),
@@ -3238,7 +3286,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
                return node;
        }
 
-       for_each_node_state(n, N_HIGH_MEMORY) {
+       for_each_node_state(n, N_MEMORY) {
 
                /* Don't want a node to appear more than once */
                if (node_isset(n, *used_node_mask))
@@ -3380,7 +3428,7 @@ static int default_zonelist_order(void)
         * local memory, NODE_ORDER may be suitable.
          */
        average_size = total_size /
-                               (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
+                               (nodes_weight(node_states[N_MEMORY]) + 1);
        for_each_online_node(nid) {
                low_kmem_size = 0;
                total_size = 0;
@@ -3870,6 +3918,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                mminit_verify_page_links(page, zone, nid, pfn);
                init_page_count(page);
                reset_page_mapcount(page);
+               reset_page_last_nid(page);
                SetPageReserved(page);
                /*
                 * Mark the block movable so that blocks are reserved for
@@ -4476,6 +4525,26 @@ void __init set_pageblock_order(void)
 
 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 
+static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
+                                                  unsigned long present_pages)
+{
+       unsigned long pages = spanned_pages;
+
+       /*
+        * Provide a more accurate estimation if there are holes within
+        * the zone and SPARSEMEM is in use. If there are holes within the
+        * zone, each populated memory region may cost us one or two extra
+        * memmap pages due to alignment because memmap pages for each
+        * populated regions may not naturally algined on page boundary.
+        * So the (present_pages >> 4) heuristic is a tradeoff for that.
+        */
+       if (spanned_pages > present_pages + (present_pages >> 4) &&
+           IS_ENABLED(CONFIG_SPARSEMEM))
+               pages = present_pages;
+
+       return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
+}
+
 /*
  * Set up the zone data structures:
  *   - mark all pages reserved
@@ -4493,54 +4562,67 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        int ret;
 
        pgdat_resize_init(pgdat);
+#ifdef CONFIG_NUMA_BALANCING
+       spin_lock_init(&pgdat->numabalancing_migrate_lock);
+       pgdat->numabalancing_migrate_nr_pages = 0;
+       pgdat->numabalancing_migrate_next_window = jiffies;
+#endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
        pgdat_page_cgroup_init(pgdat);
 
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
-               unsigned long size, realsize, memmap_pages;
+               unsigned long size, realsize, freesize, memmap_pages;
 
                size = zone_spanned_pages_in_node(nid, j, zones_size);
-               realsize = size - zone_absent_pages_in_node(nid, j,
+               realsize = freesize = size - zone_absent_pages_in_node(nid, j,
                                                                zholes_size);
 
                /*
-                * Adjust realsize so that it accounts for how much memory
+                * Adjust freesize so that it accounts for how much memory
                 * is used by this zone for memmap. This affects the watermark
                 * and per-cpu initialisations
                 */
-               memmap_pages =
-                       PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
-               if (realsize >= memmap_pages) {
-                       realsize -= memmap_pages;
+               memmap_pages = calc_memmap_size(size, realsize);
+               if (freesize >= memmap_pages) {
+                       freesize -= memmap_pages;
                        if (memmap_pages)
                                printk(KERN_DEBUG
                                       "  %s zone: %lu pages used for memmap\n",
                                       zone_names[j], memmap_pages);
                } else
                        printk(KERN_WARNING
-                               "  %s zone: %lu pages exceeds realsize %lu\n",
-                               zone_names[j], memmap_pages, realsize);
+                               "  %s zone: %lu pages exceeds freesize %lu\n",
+                               zone_names[j], memmap_pages, freesize);
 
                /* Account for reserved pages */
-               if (j == 0 && realsize > dma_reserve) {
-                       realsize -= dma_reserve;
+               if (j == 0 && freesize > dma_reserve) {
+                       freesize -= dma_reserve;
                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
                                        zone_names[0], dma_reserve);
                }
 
                if (!is_highmem_idx(j))
-                       nr_kernel_pages += realsize;
-               nr_all_pages += realsize;
+                       nr_kernel_pages += freesize;
+               /* Charge for highmem memmap if there are enough kernel pages */
+               else if (nr_kernel_pages > memmap_pages * 2)
+                       nr_kernel_pages -= memmap_pages;
+               nr_all_pages += freesize;
 
                zone->spanned_pages = size;
-               zone->present_pages = realsize;
+               zone->present_pages = freesize;
+               /*
+                * Set an approximate value for lowmem here, it will be adjusted
+                * when the bootmem allocator frees pages into the buddy system.
+                * And all highmem pages will be managed by the buddy system.
+                */
+               zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
 #ifdef CONFIG_NUMA
                zone->node = nid;
-               zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
+               zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
                                                / 100;
-               zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
+               zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                spin_lock_init(&zone->lock);
@@ -4731,7 +4813,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
 /*
  * early_calculate_totalpages()
  * Sum pages in active regions for movable zone.
- * Populate N_HIGH_MEMORY for calculating usable_nodes.
+ * Populate N_MEMORY for calculating usable_nodes.
  */
 static unsigned long __init early_calculate_totalpages(void)
 {
@@ -4744,7 +4826,7 @@ static unsigned long __init early_calculate_totalpages(void)
 
                totalpages += pages;
                if (pages)
-                       node_set_state(nid, N_HIGH_MEMORY);
+                       node_set_state(nid, N_MEMORY);
        }
        return totalpages;
 }
@@ -4761,9 +4843,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
        unsigned long usable_startpfn;
        unsigned long kernelcore_node, kernelcore_remaining;
        /* save the state before borrow the nodemask */
-       nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
+       nodemask_t saved_node_state = node_states[N_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
-       int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+       int usable_nodes = nodes_weight(node_states[N_MEMORY]);
 
        /*
         * If movablecore was specified, calculate what size of
@@ -4798,7 +4880,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
 restart:
        /* Spread kernelcore memory as evenly as possible throughout nodes */
        kernelcore_node = required_kernelcore / usable_nodes;
-       for_each_node_state(nid, N_HIGH_MEMORY) {
+       for_each_node_state(nid, N_MEMORY) {
                unsigned long start_pfn, end_pfn;
 
                /*
@@ -4890,23 +4972,27 @@ restart:
 
 out:
        /* restore the node_state */
-       node_states[N_HIGH_MEMORY] = saved_node_state;
+       node_states[N_MEMORY] = saved_node_state;
 }
 
-/* Any regular memory on that node ? */
-static void __init check_for_regular_memory(pg_data_t *pgdat)
+/* Any regular or high memory on that node ? */
+static void check_for_memory(pg_data_t *pgdat, int nid)
 {
-#ifdef CONFIG_HIGHMEM
        enum zone_type zone_type;
 
-       for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
+       if (N_MEMORY == N_NORMAL_MEMORY)
+               return;
+
+       for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
                struct zone *zone = &pgdat->node_zones[zone_type];
                if (zone->present_pages) {
-                       node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
+                       node_set_state(nid, N_HIGH_MEMORY);
+                       if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
+                           zone_type <= ZONE_NORMAL)
+                               node_set_state(nid, N_NORMAL_MEMORY);
                        break;
                }
        }
-#endif
 }
 
 /**
@@ -4989,8 +5075,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 
                /* Any memory on that node */
                if (pgdat->node_present_pages)
-                       node_set_state(nid, N_HIGH_MEMORY);
-               check_for_regular_memory(pgdat);
+                       node_set_state(nid, N_MEMORY);
+               check_for_memory(pgdat, nid);
        }
 }
 
@@ -5727,7 +5813,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
        unsigned int tries = 0;
        int ret = 0;
 
-       migrate_prep_local();
+       migrate_prep();
 
        while (pfn < end || !list_empty(&cc->migratepages)) {
                if (fatal_signal_pending(current)) {
@@ -5755,7 +5841,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
 
                ret = migrate_pages(&cc->migratepages,
                                    alloc_migrate_target,
-                                   0, false, MIGRATE_SYNC);
+                                   0, false, MIGRATE_SYNC,
+                                   MR_CMA);
        }
 
        putback_movable_pages(&cc->migratepages);