mm: tail page refcounting optimization for slab and hugetlbfs
[firefly-linux-kernel-4.4.55.git] / mm / swap.c
1 /*
2  *  linux/mm/swap.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * This file contains the default values for the operation of the
9  * Linux VM subsystem. Fine-tuning documentation can be found in
10  * Documentation/sysctl/vm.txt.
11  * Started 18.12.91
12  * Swap aging added 23.2.95, Stephen Tweedie.
13  * Buffermem limits added 12.3.98, Rik van Riel.
14  */
15
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/swap.h>
20 #include <linux/mman.h>
21 #include <linux/pagemap.h>
22 #include <linux/pagevec.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/mm_inline.h>
26 #include <linux/percpu_counter.h>
27 #include <linux/percpu.h>
28 #include <linux/cpu.h>
29 #include <linux/notifier.h>
30 #include <linux/backing-dev.h>
31 #include <linux/memcontrol.h>
32 #include <linux/gfp.h>
33 #include <linux/uio.h>
34 #include <linux/hugetlb.h>
35
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/pagemap.h>
40
41 /* How many pages do we try to swap or page in/out together? */
42 int page_cluster;
43
44 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
45 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
46 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
47
48 /*
49  * This path almost never happens for VM activity - pages are normally
50  * freed via pagevecs.  But it gets used by networking.
51  */
52 static void __page_cache_release(struct page *page)
53 {
54         if (PageLRU(page)) {
55                 struct zone *zone = page_zone(page);
56                 struct lruvec *lruvec;
57                 unsigned long flags;
58
59                 spin_lock_irqsave(&zone->lru_lock, flags);
60                 lruvec = mem_cgroup_page_lruvec(page, zone);
61                 VM_BUG_ON(!PageLRU(page));
62                 __ClearPageLRU(page);
63                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
64                 spin_unlock_irqrestore(&zone->lru_lock, flags);
65         }
66 }
67
68 static void __put_single_page(struct page *page)
69 {
70         __page_cache_release(page);
71         free_hot_cold_page(page, 0);
72 }
73
74 static void __put_compound_page(struct page *page)
75 {
76         compound_page_dtor *dtor;
77
78         __page_cache_release(page);
79         dtor = get_compound_page_dtor(page);
80         (*dtor)(page);
81 }
82
83 static void put_compound_page(struct page *page)
84 {
85         if (unlikely(PageTail(page))) {
86                 /* __split_huge_page_refcount can run under us */
87                 struct page *page_head = compound_trans_head(page);
88
89                 /*
90                  * THP can not break up slab pages so avoid taking
91                  * compound_lock() and skip the tail page refcounting
92                  * (in _mapcount) too. Slab performs non-atomic bit
93                  * ops on page->flags for better performance. In
94                  * particular slab_unlock() in slub used to be a hot
95                  * path. It is still hot on arches that do not support
96                  * this_cpu_cmpxchg_double().
97                  *
98                  * If "page" is part of a slab or hugetlbfs page it
99                  * cannot be splitted and the head page cannot change
100                  * from under us. And if "page" is part of a THP page
101                  * under splitting, if the head page pointed by the
102                  * THP tail isn't a THP head anymore, we'll find
103                  * PageTail clear after smp_rmb() and we'll treat it
104                  * as a single page.
105                  */
106                 if (!__compound_tail_refcounted(page_head)) {
107                         /*
108                          * If "page" is a THP tail, we must read the tail page
109                          * flags after the head page flags. The
110                          * split_huge_page side enforces write memory
111                          * barriers between clearing PageTail and before the
112                          * head page can be freed and reallocated.
113                          */
114                         smp_rmb();
115                         if (likely(PageTail(page))) {
116                                 /*
117                                  * __split_huge_page_refcount
118                                  * cannot race here.
119                                  */
120                                 VM_BUG_ON(!PageHead(page_head));
121                                 VM_BUG_ON(page_mapcount(page) != 0);
122                                 if (put_page_testzero(page_head)) {
123                                         /*
124                                          * If this is the tail of a
125                                          * slab compound page, the
126                                          * tail pin must not be the
127                                          * last reference held on the
128                                          * page, because the PG_slab
129                                          * cannot be cleared before
130                                          * all tail pins (which skips
131                                          * the _mapcount tail
132                                          * refcounting) have been
133                                          * released. For hugetlbfs the
134                                          * tail pin may be the last
135                                          * reference on the page
136                                          * instead, because
137                                          * PageHeadHuge will not go
138                                          * away until the compound
139                                          * page enters the buddy
140                                          * allocator.
141                                          */
142                                         VM_BUG_ON(PageSlab(page_head));
143                                         __put_compound_page(page_head);
144                                 }
145                                 return;
146                         } else
147                                 /*
148                                  * __split_huge_page_refcount
149                                  * run before us, "page" was a
150                                  * THP tail. The split
151                                  * page_head has been freed
152                                  * and reallocated as slab or
153                                  * hugetlbfs page of smaller
154                                  * order (only possible if
155                                  * reallocated as slab on
156                                  * x86).
157                                  */
158                                 goto out_put_single;
159                 }
160
161                 if (likely(page != page_head &&
162                            get_page_unless_zero(page_head))) {
163                         unsigned long flags;
164
165                         /*
166                          * page_head wasn't a dangling pointer but it
167                          * may not be a head page anymore by the time
168                          * we obtain the lock. That is ok as long as it
169                          * can't be freed from under us.
170                          */
171                         flags = compound_lock_irqsave(page_head);
172                         if (unlikely(!PageTail(page))) {
173                                 /* __split_huge_page_refcount run before us */
174                                 compound_unlock_irqrestore(page_head, flags);
175                                 if (put_page_testzero(page_head)) {
176                                         /*
177                                          * The head page may have been
178                                          * freed and reallocated as a
179                                          * compound page of smaller
180                                          * order and then freed again.
181                                          * All we know is that it
182                                          * cannot have become: a THP
183                                          * page, a compound page of
184                                          * higher order, a tail page.
185                                          * That is because we still
186                                          * hold the refcount of the
187                                          * split THP tail and
188                                          * page_head was the THP head
189                                          * before the split.
190                                          */
191                                         if (PageHead(page_head))
192                                                 __put_compound_page(page_head);
193                                         else
194                                                 __put_single_page(page_head);
195                                 }
196 out_put_single:
197                                 if (put_page_testzero(page))
198                                         __put_single_page(page);
199                                 return;
200                         }
201                         VM_BUG_ON(page_head != page->first_page);
202                         /*
203                          * We can release the refcount taken by
204                          * get_page_unless_zero() now that
205                          * __split_huge_page_refcount() is blocked on
206                          * the compound_lock.
207                          */
208                         if (put_page_testzero(page_head))
209                                 VM_BUG_ON(1);
210                         /* __split_huge_page_refcount will wait now */
211                         VM_BUG_ON(page_mapcount(page) <= 0);
212                         atomic_dec(&page->_mapcount);
213                         VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
214                         VM_BUG_ON(atomic_read(&page->_count) != 0);
215                         compound_unlock_irqrestore(page_head, flags);
216
217                         if (put_page_testzero(page_head)) {
218                                 if (PageHead(page_head))
219                                         __put_compound_page(page_head);
220                                 else
221                                         __put_single_page(page_head);
222                         }
223                 } else {
224                         /* page_head is a dangling pointer */
225                         VM_BUG_ON(PageTail(page));
226                         goto out_put_single;
227                 }
228         } else if (put_page_testzero(page)) {
229                 if (PageHead(page))
230                         __put_compound_page(page);
231                 else
232                         __put_single_page(page);
233         }
234 }
235
236 void put_page(struct page *page)
237 {
238         if (unlikely(PageCompound(page)))
239                 put_compound_page(page);
240         else if (put_page_testzero(page))
241                 __put_single_page(page);
242 }
243 EXPORT_SYMBOL(put_page);
244
245 /*
246  * This function is exported but must not be called by anything other
247  * than get_page(). It implements the slow path of get_page().
248  */
249 bool __get_page_tail(struct page *page)
250 {
251         /*
252          * This takes care of get_page() if run on a tail page
253          * returned by one of the get_user_pages/follow_page variants.
254          * get_user_pages/follow_page itself doesn't need the compound
255          * lock because it runs __get_page_tail_foll() under the
256          * proper PT lock that already serializes against
257          * split_huge_page().
258          */
259         unsigned long flags;
260         bool got;
261         struct page *page_head = compound_trans_head(page);
262
263         /* Ref to put_compound_page() comment. */
264         if (PageSlab(page_head) || PageHeadHuge(page_head)) {
265                 smp_rmb();
266                 if (likely(PageTail(page))) {
267                         /*
268                          * This is a hugetlbfs page or a slab
269                          * page. __split_huge_page_refcount
270                          * cannot race here.
271                          */
272                         VM_BUG_ON(!PageHead(page_head));
273                         __get_page_tail_foll(page, true);
274                         return true;
275                 } else {
276                         /*
277                          * __split_huge_page_refcount run
278                          * before us, "page" was a THP
279                          * tail. The split page_head has been
280                          * freed and reallocated as slab or
281                          * hugetlbfs page of smaller order
282                          * (only possible if reallocated as
283                          * slab on x86).
284                          */
285                         return false;
286                 }
287         }
288
289         got = false;
290         if (likely(page != page_head && get_page_unless_zero(page_head))) {
291                 /*
292                  * page_head wasn't a dangling pointer but it
293                  * may not be a head page anymore by the time
294                  * we obtain the lock. That is ok as long as it
295                  * can't be freed from under us.
296                  */
297                 flags = compound_lock_irqsave(page_head);
298                 /* here __split_huge_page_refcount won't run anymore */
299                 if (likely(PageTail(page))) {
300                         __get_page_tail_foll(page, false);
301                         got = true;
302                 }
303                 compound_unlock_irqrestore(page_head, flags);
304                 if (unlikely(!got))
305                         put_page(page_head);
306         }
307         return got;
308 }
309 EXPORT_SYMBOL(__get_page_tail);
310
311 /**
312  * put_pages_list() - release a list of pages
313  * @pages: list of pages threaded on page->lru
314  *
315  * Release a list of pages which are strung together on page.lru.  Currently
316  * used by read_cache_pages() and related error recovery code.
317  */
318 void put_pages_list(struct list_head *pages)
319 {
320         while (!list_empty(pages)) {
321                 struct page *victim;
322
323                 victim = list_entry(pages->prev, struct page, lru);
324                 list_del(&victim->lru);
325                 page_cache_release(victim);
326         }
327 }
328 EXPORT_SYMBOL(put_pages_list);
329
330 /*
331  * get_kernel_pages() - pin kernel pages in memory
332  * @kiov:       An array of struct kvec structures
333  * @nr_segs:    number of segments to pin
334  * @write:      pinning for read/write, currently ignored
335  * @pages:      array that receives pointers to the pages pinned.
336  *              Should be at least nr_segs long.
337  *
338  * Returns number of pages pinned. This may be fewer than the number
339  * requested. If nr_pages is 0 or negative, returns 0. If no pages
340  * were pinned, returns -errno. Each page returned must be released
341  * with a put_page() call when it is finished with.
342  */
343 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
344                 struct page **pages)
345 {
346         int seg;
347
348         for (seg = 0; seg < nr_segs; seg++) {
349                 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
350                         return seg;
351
352                 pages[seg] = kmap_to_page(kiov[seg].iov_base);
353                 page_cache_get(pages[seg]);
354         }
355
356         return seg;
357 }
358 EXPORT_SYMBOL_GPL(get_kernel_pages);
359
360 /*
361  * get_kernel_page() - pin a kernel page in memory
362  * @start:      starting kernel address
363  * @write:      pinning for read/write, currently ignored
364  * @pages:      array that receives pointer to the page pinned.
365  *              Must be at least nr_segs long.
366  *
367  * Returns 1 if page is pinned. If the page was not pinned, returns
368  * -errno. The page returned must be released with a put_page() call
369  * when it is finished with.
370  */
371 int get_kernel_page(unsigned long start, int write, struct page **pages)
372 {
373         const struct kvec kiov = {
374                 .iov_base = (void *)start,
375                 .iov_len = PAGE_SIZE
376         };
377
378         return get_kernel_pages(&kiov, 1, write, pages);
379 }
380 EXPORT_SYMBOL_GPL(get_kernel_page);
381
382 static void pagevec_lru_move_fn(struct pagevec *pvec,
383         void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
384         void *arg)
385 {
386         int i;
387         struct zone *zone = NULL;
388         struct lruvec *lruvec;
389         unsigned long flags = 0;
390
391         for (i = 0; i < pagevec_count(pvec); i++) {
392                 struct page *page = pvec->pages[i];
393                 struct zone *pagezone = page_zone(page);
394
395                 if (pagezone != zone) {
396                         if (zone)
397                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
398                         zone = pagezone;
399                         spin_lock_irqsave(&zone->lru_lock, flags);
400                 }
401
402                 lruvec = mem_cgroup_page_lruvec(page, zone);
403                 (*move_fn)(page, lruvec, arg);
404         }
405         if (zone)
406                 spin_unlock_irqrestore(&zone->lru_lock, flags);
407         release_pages(pvec->pages, pvec->nr, pvec->cold);
408         pagevec_reinit(pvec);
409 }
410
411 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
412                                  void *arg)
413 {
414         int *pgmoved = arg;
415
416         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
417                 enum lru_list lru = page_lru_base_type(page);
418                 list_move_tail(&page->lru, &lruvec->lists[lru]);
419                 (*pgmoved)++;
420         }
421 }
422
423 /*
424  * pagevec_move_tail() must be called with IRQ disabled.
425  * Otherwise this may cause nasty races.
426  */
427 static void pagevec_move_tail(struct pagevec *pvec)
428 {
429         int pgmoved = 0;
430
431         pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
432         __count_vm_events(PGROTATED, pgmoved);
433 }
434
435 /*
436  * Writeback is about to end against a page which has been marked for immediate
437  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
438  * inactive list.
439  */
440 void rotate_reclaimable_page(struct page *page)
441 {
442         if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
443             !PageUnevictable(page) && PageLRU(page)) {
444                 struct pagevec *pvec;
445                 unsigned long flags;
446
447                 page_cache_get(page);
448                 local_irq_save(flags);
449                 pvec = &__get_cpu_var(lru_rotate_pvecs);
450                 if (!pagevec_add(pvec, page))
451                         pagevec_move_tail(pvec);
452                 local_irq_restore(flags);
453         }
454 }
455
456 static void update_page_reclaim_stat(struct lruvec *lruvec,
457                                      int file, int rotated)
458 {
459         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
460
461         reclaim_stat->recent_scanned[file]++;
462         if (rotated)
463                 reclaim_stat->recent_rotated[file]++;
464 }
465
466 static void __activate_page(struct page *page, struct lruvec *lruvec,
467                             void *arg)
468 {
469         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
470                 int file = page_is_file_cache(page);
471                 int lru = page_lru_base_type(page);
472
473                 del_page_from_lru_list(page, lruvec, lru);
474                 SetPageActive(page);
475                 lru += LRU_ACTIVE;
476                 add_page_to_lru_list(page, lruvec, lru);
477                 trace_mm_lru_activate(page, page_to_pfn(page));
478
479                 __count_vm_event(PGACTIVATE);
480                 update_page_reclaim_stat(lruvec, file, 1);
481         }
482 }
483
484 #ifdef CONFIG_SMP
485 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
486
487 static void activate_page_drain(int cpu)
488 {
489         struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
490
491         if (pagevec_count(pvec))
492                 pagevec_lru_move_fn(pvec, __activate_page, NULL);
493 }
494
495 static bool need_activate_page_drain(int cpu)
496 {
497         return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
498 }
499
500 void activate_page(struct page *page)
501 {
502         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
503                 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
504
505                 page_cache_get(page);
506                 if (!pagevec_add(pvec, page))
507                         pagevec_lru_move_fn(pvec, __activate_page, NULL);
508                 put_cpu_var(activate_page_pvecs);
509         }
510 }
511
512 #else
513 static inline void activate_page_drain(int cpu)
514 {
515 }
516
517 static bool need_activate_page_drain(int cpu)
518 {
519         return false;
520 }
521
522 void activate_page(struct page *page)
523 {
524         struct zone *zone = page_zone(page);
525
526         spin_lock_irq(&zone->lru_lock);
527         __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
528         spin_unlock_irq(&zone->lru_lock);
529 }
530 #endif
531
532 static void __lru_cache_activate_page(struct page *page)
533 {
534         struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
535         int i;
536
537         /*
538          * Search backwards on the optimistic assumption that the page being
539          * activated has just been added to this pagevec. Note that only
540          * the local pagevec is examined as a !PageLRU page could be in the
541          * process of being released, reclaimed, migrated or on a remote
542          * pagevec that is currently being drained. Furthermore, marking
543          * a remote pagevec's page PageActive potentially hits a race where
544          * a page is marked PageActive just after it is added to the inactive
545          * list causing accounting errors and BUG_ON checks to trigger.
546          */
547         for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
548                 struct page *pagevec_page = pvec->pages[i];
549
550                 if (pagevec_page == page) {
551                         SetPageActive(page);
552                         break;
553                 }
554         }
555
556         put_cpu_var(lru_add_pvec);
557 }
558
559 /*
560  * Mark a page as having seen activity.
561  *
562  * inactive,unreferenced        ->      inactive,referenced
563  * inactive,referenced          ->      active,unreferenced
564  * active,unreferenced          ->      active,referenced
565  */
566 void mark_page_accessed(struct page *page)
567 {
568         if (!PageActive(page) && !PageUnevictable(page) &&
569                         PageReferenced(page)) {
570
571                 /*
572                  * If the page is on the LRU, queue it for activation via
573                  * activate_page_pvecs. Otherwise, assume the page is on a
574                  * pagevec, mark it active and it'll be moved to the active
575                  * LRU on the next drain.
576                  */
577                 if (PageLRU(page))
578                         activate_page(page);
579                 else
580                         __lru_cache_activate_page(page);
581                 ClearPageReferenced(page);
582         } else if (!PageReferenced(page)) {
583                 SetPageReferenced(page);
584         }
585 }
586 EXPORT_SYMBOL(mark_page_accessed);
587
588 /*
589  * Queue the page for addition to the LRU via pagevec. The decision on whether
590  * to add the page to the [in]active [file|anon] list is deferred until the
591  * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
592  * have the page added to the active list using mark_page_accessed().
593  */
594 void __lru_cache_add(struct page *page)
595 {
596         struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
597
598         page_cache_get(page);
599         if (!pagevec_space(pvec))
600                 __pagevec_lru_add(pvec);
601         pagevec_add(pvec, page);
602         put_cpu_var(lru_add_pvec);
603 }
604 EXPORT_SYMBOL(__lru_cache_add);
605
606 /**
607  * lru_cache_add - add a page to a page list
608  * @page: the page to be added to the LRU.
609  */
610 void lru_cache_add(struct page *page)
611 {
612         VM_BUG_ON(PageActive(page) && PageUnevictable(page));
613         VM_BUG_ON(PageLRU(page));
614         __lru_cache_add(page);
615 }
616
617 /**
618  * add_page_to_unevictable_list - add a page to the unevictable list
619  * @page:  the page to be added to the unevictable list
620  *
621  * Add page directly to its zone's unevictable list.  To avoid races with
622  * tasks that might be making the page evictable, through eg. munlock,
623  * munmap or exit, while it's not on the lru, we want to add the page
624  * while it's locked or otherwise "invisible" to other tasks.  This is
625  * difficult to do when using the pagevec cache, so bypass that.
626  */
627 void add_page_to_unevictable_list(struct page *page)
628 {
629         struct zone *zone = page_zone(page);
630         struct lruvec *lruvec;
631
632         spin_lock_irq(&zone->lru_lock);
633         lruvec = mem_cgroup_page_lruvec(page, zone);
634         ClearPageActive(page);
635         SetPageUnevictable(page);
636         SetPageLRU(page);
637         add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
638         spin_unlock_irq(&zone->lru_lock);
639 }
640
641 /*
642  * If the page can not be invalidated, it is moved to the
643  * inactive list to speed up its reclaim.  It is moved to the
644  * head of the list, rather than the tail, to give the flusher
645  * threads some time to write it out, as this is much more
646  * effective than the single-page writeout from reclaim.
647  *
648  * If the page isn't page_mapped and dirty/writeback, the page
649  * could reclaim asap using PG_reclaim.
650  *
651  * 1. active, mapped page -> none
652  * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
653  * 3. inactive, mapped page -> none
654  * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
655  * 5. inactive, clean -> inactive, tail
656  * 6. Others -> none
657  *
658  * In 4, why it moves inactive's head, the VM expects the page would
659  * be write it out by flusher threads as this is much more effective
660  * than the single-page writeout from reclaim.
661  */
662 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
663                               void *arg)
664 {
665         int lru, file;
666         bool active;
667
668         if (!PageLRU(page))
669                 return;
670
671         if (PageUnevictable(page))
672                 return;
673
674         /* Some processes are using the page */
675         if (page_mapped(page))
676                 return;
677
678         active = PageActive(page);
679         file = page_is_file_cache(page);
680         lru = page_lru_base_type(page);
681
682         del_page_from_lru_list(page, lruvec, lru + active);
683         ClearPageActive(page);
684         ClearPageReferenced(page);
685         add_page_to_lru_list(page, lruvec, lru);
686
687         if (PageWriteback(page) || PageDirty(page)) {
688                 /*
689                  * PG_reclaim could be raced with end_page_writeback
690                  * It can make readahead confusing.  But race window
691                  * is _really_ small and  it's non-critical problem.
692                  */
693                 SetPageReclaim(page);
694         } else {
695                 /*
696                  * The page's writeback ends up during pagevec
697                  * We moves tha page into tail of inactive.
698                  */
699                 list_move_tail(&page->lru, &lruvec->lists[lru]);
700                 __count_vm_event(PGROTATED);
701         }
702
703         if (active)
704                 __count_vm_event(PGDEACTIVATE);
705         update_page_reclaim_stat(lruvec, file, 0);
706 }
707
708 /*
709  * Drain pages out of the cpu's pagevecs.
710  * Either "cpu" is the current CPU, and preemption has already been
711  * disabled; or "cpu" is being hot-unplugged, and is already dead.
712  */
713 void lru_add_drain_cpu(int cpu)
714 {
715         struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
716
717         if (pagevec_count(pvec))
718                 __pagevec_lru_add(pvec);
719
720         pvec = &per_cpu(lru_rotate_pvecs, cpu);
721         if (pagevec_count(pvec)) {
722                 unsigned long flags;
723
724                 /* No harm done if a racing interrupt already did this */
725                 local_irq_save(flags);
726                 pagevec_move_tail(pvec);
727                 local_irq_restore(flags);
728         }
729
730         pvec = &per_cpu(lru_deactivate_pvecs, cpu);
731         if (pagevec_count(pvec))
732                 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
733
734         activate_page_drain(cpu);
735 }
736
737 /**
738  * deactivate_page - forcefully deactivate a page
739  * @page: page to deactivate
740  *
741  * This function hints the VM that @page is a good reclaim candidate,
742  * for example if its invalidation fails due to the page being dirty
743  * or under writeback.
744  */
745 void deactivate_page(struct page *page)
746 {
747         /*
748          * In a workload with many unevictable page such as mprotect, unevictable
749          * page deactivation for accelerating reclaim is pointless.
750          */
751         if (PageUnevictable(page))
752                 return;
753
754         if (likely(get_page_unless_zero(page))) {
755                 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
756
757                 if (!pagevec_add(pvec, page))
758                         pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
759                 put_cpu_var(lru_deactivate_pvecs);
760         }
761 }
762
763 void lru_add_drain(void)
764 {
765         lru_add_drain_cpu(get_cpu());
766         put_cpu();
767 }
768
769 static void lru_add_drain_per_cpu(struct work_struct *dummy)
770 {
771         lru_add_drain();
772 }
773
774 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
775
776 void lru_add_drain_all(void)
777 {
778         static DEFINE_MUTEX(lock);
779         static struct cpumask has_work;
780         int cpu;
781
782         mutex_lock(&lock);
783         get_online_cpus();
784         cpumask_clear(&has_work);
785
786         for_each_online_cpu(cpu) {
787                 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
788
789                 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
790                     pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
791                     pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
792                     need_activate_page_drain(cpu)) {
793                         INIT_WORK(work, lru_add_drain_per_cpu);
794                         schedule_work_on(cpu, work);
795                         cpumask_set_cpu(cpu, &has_work);
796                 }
797         }
798
799         for_each_cpu(cpu, &has_work)
800                 flush_work(&per_cpu(lru_add_drain_work, cpu));
801
802         put_online_cpus();
803         mutex_unlock(&lock);
804 }
805
806 /*
807  * Batched page_cache_release().  Decrement the reference count on all the
808  * passed pages.  If it fell to zero then remove the page from the LRU and
809  * free it.
810  *
811  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
812  * for the remainder of the operation.
813  *
814  * The locking in this function is against shrink_inactive_list(): we recheck
815  * the page count inside the lock to see whether shrink_inactive_list()
816  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
817  * will free it.
818  */
819 void release_pages(struct page **pages, int nr, int cold)
820 {
821         int i;
822         LIST_HEAD(pages_to_free);
823         struct zone *zone = NULL;
824         struct lruvec *lruvec;
825         unsigned long uninitialized_var(flags);
826
827         for (i = 0; i < nr; i++) {
828                 struct page *page = pages[i];
829
830                 if (unlikely(PageCompound(page))) {
831                         if (zone) {
832                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
833                                 zone = NULL;
834                         }
835                         put_compound_page(page);
836                         continue;
837                 }
838
839                 if (!put_page_testzero(page))
840                         continue;
841
842                 if (PageLRU(page)) {
843                         struct zone *pagezone = page_zone(page);
844
845                         if (pagezone != zone) {
846                                 if (zone)
847                                         spin_unlock_irqrestore(&zone->lru_lock,
848                                                                         flags);
849                                 zone = pagezone;
850                                 spin_lock_irqsave(&zone->lru_lock, flags);
851                         }
852
853                         lruvec = mem_cgroup_page_lruvec(page, zone);
854                         VM_BUG_ON(!PageLRU(page));
855                         __ClearPageLRU(page);
856                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
857                 }
858
859                 /* Clear Active bit in case of parallel mark_page_accessed */
860                 ClearPageActive(page);
861
862                 list_add(&page->lru, &pages_to_free);
863         }
864         if (zone)
865                 spin_unlock_irqrestore(&zone->lru_lock, flags);
866
867         free_hot_cold_page_list(&pages_to_free, cold);
868 }
869 EXPORT_SYMBOL(release_pages);
870
871 /*
872  * The pages which we're about to release may be in the deferred lru-addition
873  * queues.  That would prevent them from really being freed right now.  That's
874  * OK from a correctness point of view but is inefficient - those pages may be
875  * cache-warm and we want to give them back to the page allocator ASAP.
876  *
877  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
878  * and __pagevec_lru_add_active() call release_pages() directly to avoid
879  * mutual recursion.
880  */
881 void __pagevec_release(struct pagevec *pvec)
882 {
883         lru_add_drain();
884         release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
885         pagevec_reinit(pvec);
886 }
887 EXPORT_SYMBOL(__pagevec_release);
888
889 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
890 /* used by __split_huge_page_refcount() */
891 void lru_add_page_tail(struct page *page, struct page *page_tail,
892                        struct lruvec *lruvec, struct list_head *list)
893 {
894         const int file = 0;
895
896         VM_BUG_ON(!PageHead(page));
897         VM_BUG_ON(PageCompound(page_tail));
898         VM_BUG_ON(PageLRU(page_tail));
899         VM_BUG_ON(NR_CPUS != 1 &&
900                   !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
901
902         if (!list)
903                 SetPageLRU(page_tail);
904
905         if (likely(PageLRU(page)))
906                 list_add_tail(&page_tail->lru, &page->lru);
907         else if (list) {
908                 /* page reclaim is reclaiming a huge page */
909                 get_page(page_tail);
910                 list_add_tail(&page_tail->lru, list);
911         } else {
912                 struct list_head *list_head;
913                 /*
914                  * Head page has not yet been counted, as an hpage,
915                  * so we must account for each subpage individually.
916                  *
917                  * Use the standard add function to put page_tail on the list,
918                  * but then correct its position so they all end up in order.
919                  */
920                 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
921                 list_head = page_tail->lru.prev;
922                 list_move_tail(&page_tail->lru, list_head);
923         }
924
925         if (!PageUnevictable(page))
926                 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
927 }
928 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
929
930 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
931                                  void *arg)
932 {
933         int file = page_is_file_cache(page);
934         int active = PageActive(page);
935         enum lru_list lru = page_lru(page);
936
937         VM_BUG_ON(PageLRU(page));
938
939         SetPageLRU(page);
940         add_page_to_lru_list(page, lruvec, lru);
941         update_page_reclaim_stat(lruvec, file, active);
942         trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
943 }
944
945 /*
946  * Add the passed pages to the LRU, then drop the caller's refcount
947  * on them.  Reinitialises the caller's pagevec.
948  */
949 void __pagevec_lru_add(struct pagevec *pvec)
950 {
951         pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
952 }
953 EXPORT_SYMBOL(__pagevec_lru_add);
954
955 /**
956  * pagevec_lookup - gang pagecache lookup
957  * @pvec:       Where the resulting pages are placed
958  * @mapping:    The address_space to search
959  * @start:      The starting page index
960  * @nr_pages:   The maximum number of pages
961  *
962  * pagevec_lookup() will search for and return a group of up to @nr_pages pages
963  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
964  * reference against the pages in @pvec.
965  *
966  * The search returns a group of mapping-contiguous pages with ascending
967  * indexes.  There may be holes in the indices due to not-present pages.
968  *
969  * pagevec_lookup() returns the number of pages which were found.
970  */
971 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
972                 pgoff_t start, unsigned nr_pages)
973 {
974         pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
975         return pagevec_count(pvec);
976 }
977 EXPORT_SYMBOL(pagevec_lookup);
978
979 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
980                 pgoff_t *index, int tag, unsigned nr_pages)
981 {
982         pvec->nr = find_get_pages_tag(mapping, index, tag,
983                                         nr_pages, pvec->pages);
984         return pagevec_count(pvec);
985 }
986 EXPORT_SYMBOL(pagevec_lookup_tag);
987
988 /*
989  * Perform any setup for the swap system
990  */
991 void __init swap_setup(void)
992 {
993         unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
994 #ifdef CONFIG_SWAP
995         int i;
996
997         if (bdi_init(swapper_spaces[0].backing_dev_info))
998                 panic("Failed to init swap bdi");
999         for (i = 0; i < MAX_SWAPFILES; i++) {
1000                 spin_lock_init(&swapper_spaces[i].tree_lock);
1001                 INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
1002         }
1003 #endif
1004
1005         /* Use a smaller cluster for small-memory machines */
1006         if (megs < 16)
1007                 page_cluster = 2;
1008         else
1009                 page_cluster = 3;
1010         /*
1011          * Right now other parts of the system means that we
1012          * _really_ don't want to cluster much more
1013          */
1014 }