X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=bf6c9285013fb0372693ef82f75cdd50007b2620;hb=77e142e21dfd6507f7d1f0464c12f9e5502e1f10;hp=0ab02fb8e9b1d6093ab12ee521e7dfb2f37576dc;hpb=a44e5e7cf580307809147cbec3d326e77524f757;p=firefly-linux-kernel-4.4.55.git diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0ab02fb8e9b1..bf6c9285013f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -196,7 +196,20 @@ static char * const zone_names[MAX_NR_ZONES] = { "Movable", }; +/* + * Try to keep at least this much lowmem free. Do not allow normal + * allocations below this point, only high priority ones. Automatically + * tuned according to the amount of memory in the system. + */ int min_free_kbytes = 1024; +int min_free_order_shift = 1; + +/* + * Extra memory for the system to try freeing. Used to temporarily + * free memory, to make space for new workloads. Anyone can allocate + * down to the min watermarks controlled by min_free_kbytes above. + */ +int extra_free_kbytes = 0; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; @@ -1037,6 +1050,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) page = list_entry(area->free_list[migratetype].next, struct page, lru); + +#ifdef CONFIG_ARCH_ROCKCHIP + if (is_migrate_cma(migratetype)){ + int mt = get_pageblock_migratetype(page); + if (unlikely(is_migrate_isolate(mt))) + continue; + } +#endif + area->nr_free--; /* @@ -1650,7 +1672,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, free_pages -= z->free_area[o].nr_free << o; /* Require fewer higher order pages to be free */ - min >>= 1; + min >>= min_free_order_shift; if (free_pages <= min) return false; @@ -4529,7 +4551,7 @@ static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ -void __init set_pageblock_order(void) +void __paginginit set_pageblock_order(void) { unsigned int order; @@ -4557,7 +4579,7 @@ void __init set_pageblock_order(void) * include/linux/pageblock-flags.h for the values of pageblock_order based on * the kernel config */ -void __init set_pageblock_order(void) +void __paginginit set_pageblock_order(void) { } @@ -5322,6 +5344,7 @@ static void setup_per_zone_lowmem_reserve(void) static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -5333,11 +5356,14 @@ static void __setup_per_zone_wmarks(void) } for_each_zone(zone) { - u64 tmp; + u64 min, low; spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->managed_pages; - do_div(tmp, lowmem_pages); + min = (u64)pages_min * zone->managed_pages; + do_div(min, lowmem_pages); + low = (u64)pages_low * zone->managed_pages; + do_div(low, vm_total_pages); + if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -5358,11 +5384,13 @@ static void __setup_per_zone_wmarks(void) * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->watermark[WMARK_MIN] = tmp; + zone->watermark[WMARK_MIN] = min; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + low + (min >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + low + (min >> 1); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); @@ -5475,7 +5503,7 @@ module_init(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes - * changes. + * or extra_free_kbytes changes. */ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos)