vmscan: handle isolated pages with lru lock released
[firefly-linux-kernel-4.4.55.git] / mm / vmscan.c
index d7dad2a4e69ca02c38374e33b95ef20818d9d698..57d8ef6ee4dd2d69d7ce9d5ce8cdc9e4e871b61f 100644 (file)
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                       unsigned long *nr_anon,
                       unsigned long *nr_file)
 {
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        struct zone *zone = mz->zone;
        unsigned int count[NR_LRU_LISTS] = { 0, };
        unsigned long nr_active = 0;
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                count[lru] += numpages;
        }
 
+       preempt_disable();
        __count_vm_events(PGDEACTIVATE, nr_active);
 
        __mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
        *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
        *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
 
-       reclaim_stat->recent_scanned[0] += *nr_anon;
-       reclaim_stat->recent_scanned[1] += *nr_file;
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
+       __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
+       preempt_enable();
 }
 
 /*
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_writeback = 0;
        isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
        struct zone *zone = mz->zone;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
        }
+       spin_unlock_irq(&zone->lru_lock);
 
-       if (nr_taken == 0) {
-               spin_unlock_irq(&zone->lru_lock);
+       if (nr_taken == 0)
                return 0;
-       }
 
        update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
 
-       __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
-       __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
-
-       spin_unlock_irq(&zone->lru_lock);
-
        nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
                                                &nr_dirty, &nr_writeback);
 
@@ -1569,6 +1565,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        spin_lock_irq(&zone->lru_lock);
 
+       reclaim_stat->recent_scanned[0] += nr_anon;
+       reclaim_stat->recent_scanned[1] += nr_file;
+
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
        __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
@@ -1642,18 +1641,6 @@ static void move_active_pages_to_lru(struct zone *zone,
        unsigned long pgmoved = 0;
        struct page *page;
 
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               list_for_each_entry(page, list, lru) {
-                       if (page_has_private(page) && trylock_page(page)) {
-                               if (page_has_private(page))
-                                       try_to_release_page(page, 0);
-                               unlock_page(page);
-                       }
-               }
-               spin_lock_irq(&zone->lru_lock);
-       }
-
        while (!list_empty(list)) {
                struct lruvec *lruvec;
 
@@ -1735,6 +1722,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
                        continue;
                }
 
+               if (unlikely(buffer_heads_over_limit)) {
+                       if (page_has_private(page) && trylock_page(page)) {
+                               if (page_has_private(page))
+                                       try_to_release_page(page, 0);
+                               unlock_page(page);
+                       }
+               }
+
                if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
                        nr_rotated += hpage_nr_pages(page);
                        /*
@@ -2198,7 +2193,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
         * If compaction is deferred, reclaim up to a point where
         * compaction will have a chance of success when re-enabled
         */
-       if (compaction_deferred(zone))
+       if (compaction_deferred(zone, sc->order))
                return watermark_ok;
 
        /* If compaction is not ready to start, keep reclaiming */
@@ -2238,6 +2233,14 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
        unsigned long nr_soft_scanned;
        bool aborted_reclaim = false;
 
+       /*
+        * If the number of buffer_heads in the machine exceeds the maximum
+        * allowed level, force direct reclaim to scan the highmem zone as
+        * highmem pages could be pinning lowmem pages storing buffer_heads
+        */
+       if (buffer_heads_over_limit)
+               sc->gfp_mask |= __GFP_HIGHMEM;
+
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
                if (!populated_zone(zone))
@@ -2727,6 +2730,17 @@ loop_again:
                         */
                        age_active_anon(zone, &sc, priority);
 
+                       /*
+                        * If the number of buffer_heads in the machine
+                        * exceeds the maximum allowed level and this node
+                        * has a highmem zone, force kswapd to reclaim from
+                        * it to relieve lowmem pressure.
+                        */
+                       if (buffer_heads_over_limit && is_highmem_idx(i)) {
+                               end_zone = i;
+                               break;
+                       }
+
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
@@ -2802,7 +2816,8 @@ loop_again:
                                                COMPACT_SKIPPED)
                                testorder = 0;
 
-                       if (!zone_watermark_ok_safe(zone, testorder,
+                       if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
+                                   !zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone) + balance_gap,
                                        end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
@@ -2919,6 +2934,8 @@ out:
         * and it is potentially going to sleep here.
         */
        if (order) {
+               int zones_need_compaction = 1;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
@@ -2939,9 +2956,17 @@ out:
                                goto loop_again;
                        }
 
+                       /* Check if the memory needs to be defragmented. */
+                       if (zone_watermark_ok(zone, order,
+                                   low_wmark_pages(zone), *classzone_idx, 0))
+                               zones_need_compaction = 0;
+
                        /* If balanced, clear the congested flag */
                        zone_clear_flag(zone, ZONE_CONGESTED);
                }
+
+               if (zones_need_compaction)
+                       compact_pgdat(pgdat, order);
        }
 
        /*