i2c: designware-platdrv: enable RuntimePM before registering to the core
[firefly-linux-kernel-4.4.55.git] / mm / compaction.c
index 0dce7e87d771b3d63f05ce054188a07ed4e6fa48..c5c627aae9962daf9c64d4f482c075e4cd96422a 100644 (file)
@@ -437,6 +437,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                if (!valid_page)
                        valid_page = page;
+
+               /*
+                * For compound pages such as THP and hugetlbfs, we can save
+                * potentially a lot of iterations if we skip them at once.
+                * The check is racy, but we can consider only valid values
+                * and the only danger is skipping too much.
+                */
+               if (PageCompound(page)) {
+                       unsigned int comp_order = compound_order(page);
+
+                       if (likely(comp_order < MAX_ORDER)) {
+                               blockpfn += (1UL << comp_order) - 1;
+                               cursor += (1UL << comp_order) - 1;
+                       }
+
+                       goto isolate_fail;
+               }
+
                if (!PageBuddy(page))
                        goto isolate_fail;
 
@@ -496,6 +514,13 @@ isolate_fail:
 
        }
 
+       /*
+        * There is a tiny chance that we have read bogus compound_order(),
+        * so be careful to not go outside of the pageblock.
+        */
+       if (unlikely(blockpfn > end_pfn))
+               blockpfn = end_pfn;
+
        trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
                                        nr_scanned, total_isolated);
 
@@ -680,6 +705,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
        /* Time to isolate some pages for migration */
        for (; low_pfn < end_pfn; low_pfn++) {
+               bool is_lru;
+
                /*
                 * Periodically drop the lock (if held) regardless of its
                 * contention, to give chance to IRQs. Abort async compaction
@@ -723,36 +750,35 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 * It's possible to migrate LRU pages and balloon pages
                 * Skip any other type of page
                 */
-               if (!PageLRU(page)) {
+               is_lru = PageLRU(page);
+               if (!is_lru) {
                        if (unlikely(balloon_page_movable(page))) {
                                if (balloon_page_isolate(page)) {
                                        /* Successfully isolated */
                                        goto isolate_success;
                                }
                        }
-                       continue;
                }
 
                /*
-                * PageLRU is set. lru_lock normally excludes isolation
-                * splitting and collapsing (collapsing has already happened
-                * if PageLRU is set) but the lock is not necessarily taken
-                * here and it is wasteful to take it just to check transhuge.
-                * Check TransHuge without lock and skip the whole pageblock if
-                * it's either a transhuge or hugetlbfs page, as calling
-                * compound_order() without preventing THP from splitting the
-                * page underneath us may return surprising results.
+                * Regardless of being on LRU, compound pages such as THP and
+                * hugetlbfs are not to be compacted. We can potentially save
+                * a lot of iterations if we skip them at once. The check is
+                * racy, but we can consider only valid values and the only
+                * danger is skipping too much.
                 */
-               if (PageTransHuge(page)) {
-                       if (!locked)
-                               low_pfn = ALIGN(low_pfn + 1,
-                                               pageblock_nr_pages) - 1;
-                       else
-                               low_pfn += (1 << compound_order(page)) - 1;
+               if (PageCompound(page)) {
+                       unsigned int comp_order = compound_order(page);
+
+                       if (likely(comp_order < MAX_ORDER))
+                               low_pfn += (1UL << comp_order) - 1;
 
                        continue;
                }
 
+               if (!is_lru)
+                       continue;
+
                /*
                 * Migration will fail if an anonymous page is pinned in memory,
                 * so avoid taking lru_lock and isolating it unnecessarily in an
@@ -769,11 +795,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        if (!locked)
                                break;
 
-                       /* Recheck PageLRU and PageTransHuge under lock */
+                       /* Recheck PageLRU and PageCompound under lock */
                        if (!PageLRU(page))
                                continue;
-                       if (PageTransHuge(page)) {
-                               low_pfn += (1 << compound_order(page)) - 1;
+
+                       /*
+                        * Page become compound since the non-locked check,
+                        * and it's on LRU. It can only be a THP so the order
+                        * is safe to read and it's 0 for tail pages.
+                        */
+                       if (unlikely(PageCompound(page))) {
+                               low_pfn += (1UL << compound_order(page)) - 1;
                                continue;
                        }
                }
@@ -784,7 +816,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (__isolate_lru_page(page, isolate_mode) != 0)
                        continue;
 
-               VM_BUG_ON_PAGE(PageTransCompound(page), page);
+               VM_BUG_ON_PAGE(PageCompound(page), page);
 
                /* Successfully isolated */
                del_page_from_lru_list(page, lruvec, page_lru(page));
@@ -1083,6 +1115,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                        struct compact_control *cc)
 {
        unsigned long low_pfn, end_pfn;
+       unsigned long isolate_start_pfn;
        struct page *page;
        const isolate_mode_t isolate_mode =
                (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
@@ -1131,6 +1164,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                        continue;
 
                /* Perform the isolation */
+               isolate_start_pfn = low_pfn;
                low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
                                                                isolate_mode);
 
@@ -1139,6 +1173,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                        return ISOLATE_ABORT;
                }
 
+               /*
+                * Record where we could have freed pages by migration and not
+                * yet flushed them to buddy allocator.
+                * - this is the lowest page that could have been isolated and
+                * then freed by migration.
+                */
+               if (cc->nr_migratepages && !cc->last_migrated_pfn)
+                       cc->last_migrated_pfn = isolate_start_pfn;
+
                /*
                 * Either we isolated something and proceed with migration. Or
                 * we failed and compact_zone should decide if we should
@@ -1310,7 +1353,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        unsigned long end_pfn = zone_end_pfn(zone);
        const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
        const bool sync = cc->mode != MIGRATE_ASYNC;
-       unsigned long last_migrated_pfn = 0;
 
        ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
@@ -1348,6 +1390,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
                zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
        }
+       cc->last_migrated_pfn = 0;
 
        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
                                cc->free_pfn, end_pfn, sync);
@@ -1357,7 +1400,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        while ((ret = compact_finished(zone, cc, migratetype)) ==
                                                COMPACT_CONTINUE) {
                int err;
-               unsigned long isolate_start_pfn = cc->migrate_pfn;
 
                switch (isolate_migratepages(zone, cc)) {
                case ISOLATE_ABORT:
@@ -1397,16 +1439,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                        }
                }
 
-               /*
-                * Record where we could have freed pages by migration and not
-                * yet flushed them to buddy allocator. We use the pfn that
-                * isolate_migratepages() started from in this loop iteration
-                * - this is the lowest page that could have been isolated and
-                * then freed by migration.
-                */
-               if (!last_migrated_pfn)
-                       last_migrated_pfn = isolate_start_pfn;
-
 check_drain:
                /*
                 * Has the migration scanner moved away from the previous
@@ -1415,18 +1447,18 @@ check_drain:
                 * compact_finished() can detect immediately if allocation
                 * would succeed.
                 */
-               if (cc->order > 0 && last_migrated_pfn) {
+               if (cc->order > 0 && cc->last_migrated_pfn) {
                        int cpu;
                        unsigned long current_block_start =
                                cc->migrate_pfn & ~((1UL << cc->order) - 1);
 
-                       if (last_migrated_pfn < current_block_start) {
+                       if (cc->last_migrated_pfn < current_block_start) {
                                cpu = get_cpu();
                                lru_add_drain_cpu(cpu);
                                drain_local_pages(zone);
                                put_cpu();
                                /* No more flushing until we migrate again */
-                               last_migrated_pfn = 0;
+                               cc->last_migrated_pfn = 0;
                        }
                }