mm/vmscan.c: cleanup with s/reclaim_mode/isolate_mode/
authorHillf Danton <dhillf@gmail.com>
Wed, 21 Mar 2012 23:33:48 +0000 (16:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 00:54:55 +0000 (17:54 -0700)
With tons of reclaim_mode (defined as one field of struct scan_control)
already in the file, it is clearer to rename the local reclaim_mode when
setting up the isolation mode.

Signed-off-by: Hillf Danton <dhillf@gmail.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index c52b23552659af5bce0038dd6fa7ac10d044be40..61a66881235d44e4370759e5a7358047b3df8584 100644 (file)
@@ -1509,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_file;
        unsigned long nr_dirty = 0;
        unsigned long nr_writeback = 0;
-       isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
+       isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
        struct zone *zone = mz->zone;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
@@ -1522,20 +1522,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        set_reclaim_mode(priority, sc, false);
        if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-               reclaim_mode |= ISOLATE_ACTIVE;
+               isolate_mode |= ISOLATE_ACTIVE;
 
        lru_add_drain();
 
        if (!sc->may_unmap)
-               reclaim_mode |= ISOLATE_UNMAPPED;
+               isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
-               reclaim_mode |= ISOLATE_CLEAN;
+               isolate_mode |= ISOLATE_CLEAN;
 
        spin_lock_irq(&zone->lru_lock);
 
        nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list,
                                     &nr_scanned, sc->order,
-                                    reclaim_mode, 0, file);
+                                    isolate_mode, 0, file);
        if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
@@ -1699,21 +1699,21 @@ static void shrink_active_list(unsigned long nr_to_scan,
        struct page *page;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        unsigned long nr_rotated = 0;
-       isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
+       isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
        struct zone *zone = mz->zone;
 
        lru_add_drain();
 
        if (!sc->may_unmap)
-               reclaim_mode |= ISOLATE_UNMAPPED;
+               isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
-               reclaim_mode |= ISOLATE_CLEAN;
+               isolate_mode |= ISOLATE_CLEAN;
 
        spin_lock_irq(&zone->lru_lock);
 
        nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold,
                                     &nr_scanned, sc->order,
-                                    reclaim_mode, 1, file);
+                                    isolate_mode, 1, file);
        if (global_reclaim(sc))
                zone->pages_scanned += nr_scanned;