mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
[firefly-linux-kernel-4.4.55.git] / mm / migrate.c
index 11d89dc0574c20880cd31599cca58f891ead8089..f9e16350d09c4cf6d6c280587b4995d3102d5524 100644 (file)
@@ -1599,31 +1599,38 @@ bool migrate_ratelimited(int node)
 }
 
 /* Returns true if the node is migrate rate-limited after the update */
-bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
+static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
+                                       unsigned long nr_pages)
 {
-       bool rate_limited = false;
-
        /*
         * Rate-limit the amount of data that is being migrated to a node.
         * Optimal placement is no good if the memory bus is saturated and
         * all the time is being spent migrating!
         */
-       spin_lock(&pgdat->numabalancing_migrate_lock);
        if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
+               spin_lock(&pgdat->numabalancing_migrate_lock);
                pgdat->numabalancing_migrate_nr_pages = 0;
                pgdat->numabalancing_migrate_next_window = jiffies +
                        msecs_to_jiffies(migrate_interval_millisecs);
+               spin_unlock(&pgdat->numabalancing_migrate_lock);
        }
-       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
-               rate_limited = true;
-       else
-               pgdat->numabalancing_migrate_nr_pages += nr_pages;
-       spin_unlock(&pgdat->numabalancing_migrate_lock);
-       
-       return rate_limited;
+       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
+               trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
+                                                               nr_pages);
+               return true;
+       }
+
+       /*
+        * This is an unlocked non-atomic update so errors are possible.
+        * The consequences are failing to migrate when we potentiall should
+        * have which is not severe enough to warrant locking. If it is ever
+        * a problem, it can be converted to a per-cpu counter.
+        */
+       pgdat->numabalancing_migrate_nr_pages += nr_pages;
+       return false;
 }
 
-int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;