mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
[firefly-linux-kernel-4.4.55.git] / mm / sparse.c
index 4ac1d7ef548f8ce75b65ae64057c300c8b0cc48c..63c3ea5c119c41f62d28b05e99ba4d7882241766 100644 (file)
@@ -69,7 +69,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
                else
                        section = kzalloc(array_size, GFP_KERNEL);
        } else {
-               section = alloc_bootmem_node(NODE_DATA(nid), array_size);
+               section = memblock_virt_alloc_node(array_size, nid);
        }
 
        return section;
@@ -279,8 +279,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
-       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
-                                         SMP_CACHE_BYTES, goal, limit);
+       p = memblock_virt_alloc_try_nid_nopanic(size,
+                                               SMP_CACHE_BYTES, goal, limit,
+                                               nid);
        if (!p && limit) {
                limit = 0;
                goto again;
@@ -331,7 +332,7 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       return alloc_bootmem_node_nopanic(pgdat, size);
+       return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -376,8 +377,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
                return map;
 
        size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
-       map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
-                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       map = memblock_virt_alloc_try_nid(size,
+                                         PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+                                         BOOTMEM_ALLOC_ACCESSIBLE, nid);
        return map;
 }
 void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -401,8 +403,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
        }
 
        size = PAGE_ALIGN(size);
-       map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
-                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       map = memblock_virt_alloc_try_nid(size * map_count,
+                                         PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+                                         BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
        if (map) {
                for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
                        if (!present_section_nr(pnum))
@@ -545,7 +548,7 @@ void __init sparse_init(void)
         * sparse_early_mem_map_alloc, so allocate usemap_map at first.
         */
        size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
-       usemap_map = alloc_bootmem(size);
+       usemap_map = memblock_virt_alloc(size, 0);
        if (!usemap_map)
                panic("can not allocate usemap_map\n");
        alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
@@ -553,7 +556,7 @@ void __init sparse_init(void)
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
        size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
-       map_map = alloc_bootmem(size2);
+       map_map = memblock_virt_alloc(size2, 0);
        if (!map_map)
                panic("can not allocate map_map\n");
        alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
@@ -583,40 +586,39 @@ void __init sparse_init(void)
        vmemmap_populate_print_last();
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-       free_bootmem(__pa(map_map), size2);
+       memblock_free_early(__pa(map_map), size2);
 #endif
-       free_bootmem(__pa(usemap_map), size);
+       memblock_free_early(__pa(usemap_map), size);
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-                                                unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 {
        /* This will make the necessary allocations eventually. */
        return sparse_mem_map_populate(pnum, nid);
 }
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
 {
        unsigned long start = (unsigned long)memmap;
-       unsigned long end = (unsigned long)(memmap + nr_pages);
+       unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 
        vmemmap_free(start, end);
 }
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
 {
        unsigned long start = (unsigned long)memmap;
-       unsigned long end = (unsigned long)(memmap + nr_pages);
+       unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 
        vmemmap_free(start, end);
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 #else
-static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
+static struct page *__kmalloc_section_memmap(void)
 {
        struct page *page, *ret;
-       unsigned long memmap_size = sizeof(struct page) * nr_pages;
+       unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
 
        page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
        if (page)
@@ -634,28 +636,30 @@ got_map_ptr:
        return ret;
 }
 
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-                                                 unsigned long nr_pages)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 {
-       return __kmalloc_section_memmap(nr_pages);
+       return __kmalloc_section_memmap();
 }
 
-static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+static void __kfree_section_memmap(struct page *memmap)
 {
        if (is_vmalloc_addr(memmap))
                vfree(memmap);
        else
                free_pages((unsigned long)memmap,
-                          get_order(sizeof(struct page) * nr_pages));
+                          get_order(sizeof(struct page) * PAGES_PER_SECTION));
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap)
 {
        unsigned long maps_section_nr, removing_section_nr, i;
-       unsigned long magic;
+       unsigned long magic, nr_pages;
        struct page *page = virt_to_page(memmap);
 
+       nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+               >> PAGE_SHIFT;
+
        for (i = 0; i < nr_pages; i++, page++) {
                magic = (unsigned long) page->lru.next;
 
@@ -684,8 +688,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
-                          int nr_pages)
+int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
 {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
        struct pglist_data *pgdat = zone->zone_pgdat;
@@ -702,12 +705,12 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
        ret = sparse_index_init(section_nr, pgdat->node_id);
        if (ret < 0 && ret != -EEXIST)
                return ret;
-       memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+       memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
        if (!memmap)
                return -ENOMEM;
        usemap = __kmalloc_section_usemap();
        if (!usemap) {
-               __kfree_section_memmap(memmap, nr_pages);
+               __kfree_section_memmap(memmap);
                return -ENOMEM;
        }
 
@@ -719,7 +722,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                goto out;
        }
 
-       memset(memmap, 0, sizeof(struct page) * nr_pages);
+       memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
 
        ms->section_mem_map |= SECTION_MARKED_PRESENT;
 
@@ -729,7 +732,7 @@ out:
        pgdat_resize_unlock(pgdat, &flags);
        if (ret <= 0) {
                kfree(usemap);
-               __kfree_section_memmap(memmap, nr_pages);
+               __kfree_section_memmap(memmap);
        }
        return ret;
 }
@@ -759,7 +762,6 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 {
        struct page *usemap_page;
-       unsigned long nr_pages;
 
        if (!usemap)
                return;
@@ -771,7 +773,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
        if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
                kfree(usemap);
                if (memmap)
-                       __kfree_section_memmap(memmap, PAGES_PER_SECTION);
+                       __kfree_section_memmap(memmap);
                return;
        }
 
@@ -780,12 +782,8 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
         * on the section which has pgdat at boot time. Just keep it as is now.
         */
 
-       if (memmap) {
-               nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
-                       >> PAGE_SHIFT;
-
-               free_map_bootmem(memmap, nr_pages);
-       }
+       if (memmap)
+               free_map_bootmem(memmap);
 }
 
 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)