Merge tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel
[firefly-linux-kernel-4.4.55.git] / mm / vmscan.c
index 2a0bb8fdb259dcd9d73194d136a59ffa0464d585..9b6497eda8067857d7008787868e365882820db3 100644 (file)
@@ -523,7 +523,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
  * Same as remove_mapping, but if the page is removed from the mapping, it
  * gets returned with a refcount of 0.
  */
-static int __remove_mapping(struct address_space *mapping, struct page *page)
+static int __remove_mapping(struct address_space *mapping, struct page *page,
+                           bool reclaimed)
 {
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -569,10 +570,23 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                swapcache_free(swap, page);
        } else {
                void (*freepage)(struct page *);
+               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
-
-               __delete_from_page_cache(page, NULL);
+               /*
+                * Remember a shadow entry for reclaimed file cache in
+                * order to detect refaults, thus thrashing, later on.
+                *
+                * But don't store shadows in an address space that is
+                * already exiting.  This is not just an optizimation,
+                * inode reclaim needs to empty out the radix tree or
+                * the nodes are lost.  Don't plant shadows behind its
+                * back.
+                */
+               if (reclaimed && page_is_file_cache(page) &&
+                   !mapping_exiting(mapping))
+                       shadow = workingset_eviction(mapping, page);
+               __delete_from_page_cache(page, shadow);
                spin_unlock_irq(&mapping->tree_lock);
                mem_cgroup_uncharge_cache_page(page);
 
@@ -595,7 +609,7 @@ cannot_free:
  */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (__remove_mapping(mapping, page)) {
+       if (__remove_mapping(mapping, page, false)) {
                /*
                 * Unfreezing the refcount with 1 rather than 2 effectively
                 * drops the pagecache ref for us without requiring another
@@ -1065,7 +1079,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        }
                }
 
-               if (!mapping || !__remove_mapping(mapping, page))
+               if (!mapping || !__remove_mapping(mapping, page, true))
                        goto keep_locked;
 
                /*
@@ -1848,7 +1862,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        struct zone *zone = lruvec_zone(lruvec);
        unsigned long anon_prio, file_prio;
        enum scan_balance scan_balance;
-       unsigned long anon, file, free;
+       unsigned long anon, file;
        bool force_scan = false;
        unsigned long ap, fp;
        enum lru_list lru;
@@ -1901,20 +1915,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
                get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
-       /*
-        * If it's foreseeable that reclaiming the file cache won't be
-        * enough to get the zone back into a desirable shape, we have
-        * to swap.  Better start now and leave the - probably heavily
-        * thrashing - remaining file pages alone.
-        */
-       if (global_reclaim(sc)) {
-               free = zone_page_state(zone, NR_FREE_PAGES);
-               if (unlikely(file + free <= high_wmark_pages(zone))) {
-                       scan_balance = SCAN_ANON;
-                       goto out;
-               }
-       }
-
        /*
         * There is enough inactive page cache, do not reclaim
         * anything from the anonymous working set right now.
@@ -2300,15 +2300,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
        unsigned long lru_pages = 0;
        bool aborted_reclaim = false;
        struct reclaim_state *reclaim_state = current->reclaim_state;
+       gfp_t orig_mask;
        struct shrink_control shrink = {
                .gfp_mask = sc->gfp_mask,
        };
+       enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
 
        /*
         * If the number of buffer_heads in the machine exceeds the maximum
         * allowed level, force direct reclaim to scan the highmem zone as
         * highmem pages could be pinning lowmem pages storing buffer_heads
         */
+       orig_mask = sc->gfp_mask;
        if (buffer_heads_over_limit)
                sc->gfp_mask |= __GFP_HIGHMEM;
 
@@ -2342,7 +2345,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                                 * noticeable problem, like transparent huge
                                 * page allocations.
                                 */
-                               if (compaction_ready(zone, sc)) {
+                               if ((zonelist_zone_idx(z) <= requested_highidx)
+                                   && compaction_ready(zone, sc)) {
                                        aborted_reclaim = true;
                                        continue;
                                }
@@ -2379,6 +2383,12 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                }
        }
 
+       /*
+        * Restore to original mask to avoid the impact on the caller if we
+        * promoted it to __GFP_HIGHMEM.
+        */
+       sc->gfp_mask = orig_mask;
+
        return aborted_reclaim;
 }