Merge tag 'v4.4.77' into linux-linaro-lsk-v4.4
[firefly-linux-kernel-4.4.55.git] / mm / compaction.c
index c5c627aae9962daf9c64d4f482c075e4cd96422a..dba02dec71952f5c414db14f28c8abc574624b83 100644 (file)
@@ -35,17 +35,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 #endif
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
-#ifdef CONFIG_TRACEPOINTS
-static const char *const compaction_status_string[] = {
-       "deferred",
-       "skipped",
-       "continue",
-       "partial",
-       "complete",
-       "no_suitable_page",
-       "not_suitable_zone",
-};
-#endif
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/compaction.h>
@@ -486,25 +475,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated)
+                       break;
+
                total_isolated += isolated;
+               cc->nr_freepages += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
                        page++;
                }
-
-               /* If a page was split, advance to the end of it */
-               if (isolated) {
-                       cc->nr_freepages += isolated;
-                       if (!strict &&
-                               cc->nr_migratepages <= cc->nr_freepages) {
-                               blockpfn += isolated;
-                               break;
-                       }
-
-                       blockpfn += isolated - 1;
-                       cursor += isolated - 1;
-                       continue;
+               if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+                       blockpfn += isolated;
+                       break;
                }
+               /* Advance to the end of split page */
+               blockpfn += isolated - 1;
+               cursor += isolated - 1;
+               continue;
 
 isolate_fail:
                if (strict)
@@ -514,6 +501,9 @@ isolate_fail:
 
        }
 
+       if (locked)
+               spin_unlock_irqrestore(&cc->zone->lock, flags);
+
        /*
         * There is a tiny chance that we have read bogus compound_order(),
         * so be careful to not go outside of the pageblock.
@@ -535,9 +525,6 @@ isolate_fail:
        if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
-       if (locked)
-               spin_unlock_irqrestore(&cc->zone->lock, flags);
-
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -891,16 +878,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
                                                        ISOLATE_UNEVICTABLE);
 
-               /*
-                * In case of fatal failure, release everything that might
-                * have been isolated in the previous iteration, and signal
-                * the failure back to caller.
-                */
-               if (!pfn) {
-                       putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
+               if (!pfn)
                        break;
-               }
 
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
@@ -985,7 +964,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -1009,32 +987,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolate_freepages_block(cc, &isolate_start_pfn,
-                                       block_end_pfn, freelist, false);
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }
 
@@ -1197,6 +1173,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
 }
 
+/*
+ * order == -1 is expected when compacting via
+ * /proc/sys/vm/compact_memory
+ */
+static inline bool is_via_compact_memory(int order)
+{
+       return order == -1;
+}
+
 static int __compact_finished(struct zone *zone, struct compact_control *cc,
                            const int migratetype)
 {
@@ -1204,7 +1189,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
        unsigned long watermark;
 
        if (cc->contended || fatal_signal_pending(current))
-               return COMPACT_PARTIAL;
+               return COMPACT_CONTENDED;
 
        /* Compaction run completes if the migrate and free scanner meet */
        if (compact_scanners_met(cc)) {
@@ -1223,11 +1208,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
                return COMPACT_COMPLETE;
        }
 
-       /*
-        * order == -1 is expected when compacting via
-        * /proc/sys/vm/compact_memory
-        */
-       if (cc->order == -1)
+       if (is_via_compact_memory(cc->order))
                return COMPACT_CONTINUE;
 
        /* Compaction run is not finished if the watermark is not met */
@@ -1290,11 +1271,7 @@ static unsigned long __compaction_suitable(struct zone *zone, int order,
        int fragindex;
        unsigned long watermark;
 
-       /*
-        * order == -1 is expected when compacting via
-        * /proc/sys/vm/compact_memory
-        */
-       if (order == -1)
+       if (is_via_compact_memory(order))
                return COMPACT_CONTINUE;
 
        watermark = low_wmark_pages(zone);
@@ -1403,7 +1380,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                switch (isolate_migratepages(zone, cc)) {
                case ISOLATE_ABORT:
-                       ret = COMPACT_PARTIAL;
+                       ret = COMPACT_CONTENDED;
                        putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
                        goto out;
@@ -1434,7 +1411,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                         * and we want compact_finished() to detect it
                         */
                        if (err == -ENOMEM && !compact_scanners_met(cc)) {
-                               ret = COMPACT_PARTIAL;
+                               ret = COMPACT_CONTENDED;
                                goto out;
                        }
                }
@@ -1487,6 +1464,9 @@ out:
        trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
                                cc->free_pfn, end_pfn, sync, ret);
 
+       if (ret == COMPACT_CONTENDED)
+               ret = COMPACT_PARTIAL;
+
        return ret;
 }
 
@@ -1658,10 +1638,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                 * this makes sure we compact the whole zone regardless of
                 * cached scanner positions.
                 */
-               if (cc->order == -1)
+               if (is_via_compact_memory(cc->order))
                        __reset_isolation_suitable(zone);
 
-               if (cc->order == -1 || !compaction_deferred(zone, cc->order))
+               if (is_via_compact_memory(cc->order) ||
+                               !compaction_deferred(zone, cc->order))
                        compact_zone(zone, cc);
 
                if (cc->order > 0) {