2 * Macros for manipulating and testing page->flags
8 #include <linux/types.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
17 * Various page->flags bits:
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
42 * PG_error is set to indicate that an I/O error occurred on this page.
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
60 * locked- and dirty-page accounting.
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
66 * | FIELD | ... | FLAGS |
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
75 PG_locked, /* Page is locked. Don't touch. */
83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
86 PG_private, /* If pagecache, has fs-private data */
87 PG_private_2, /* If pagecache, has fs aux data */
88 PG_writeback, /* Page is under writeback */
89 #ifdef CONFIG_PAGEFLAGS_EXTENDED
90 PG_head, /* A head page */
91 PG_tail, /* A tail page */
93 PG_compound, /* A compound page */
95 PG_swapcache, /* Swap page: swp_entry_t in private */
96 PG_mappedtodisk, /* Has blocks allocated on-disk */
97 PG_reclaim, /* To be reclaimed asap */
98 PG_swapbacked, /* Page is backed by RAM/swap */
99 PG_unevictable, /* Page is "unevictable" */
101 PG_mlocked, /* Page is vma mlocked */
103 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
104 PG_uncached, /* Page has been mapped as uncached */
106 #ifdef CONFIG_MEMORY_FAILURE
107 PG_hwpoison, /* hardware poisoned page. Don't touch */
109 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
112 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
119 PG_checked = PG_owner_priv_1,
121 /* Two page bits are conscripted by FS-Cache to maintain local caching
122 * state. These bits are set on pages belonging to the netfs's inodes
123 * when those inodes are being locally cached.
125 PG_fscache = PG_private_2, /* page backed by cache */
128 /* Pinned in Xen as a read-only pagetable page. */
129 PG_pinned = PG_owner_priv_1,
130 /* Pinned as part of domain save (see xen_mm_pin_all()). */
131 PG_savepinned = PG_dirty,
132 /* Has a grant mapping of another (foreign) domain's page. */
133 PG_foreign = PG_owner_priv_1,
136 PG_slob_free = PG_private,
139 #ifndef __GENERATING_BOUNDS_H
142 * Macros to create function definitions for page flags
144 #define TESTPAGEFLAG(uname, lname) \
145 static inline int Page##uname(const struct page *page) \
146 { return test_bit(PG_##lname, &page->flags); }
148 #define SETPAGEFLAG(uname, lname) \
149 static inline void SetPage##uname(struct page *page) \
150 { set_bit(PG_##lname, &page->flags); }
152 #define CLEARPAGEFLAG(uname, lname) \
153 static inline void ClearPage##uname(struct page *page) \
154 { clear_bit(PG_##lname, &page->flags); }
156 #define __SETPAGEFLAG(uname, lname) \
157 static inline void __SetPage##uname(struct page *page) \
158 { __set_bit(PG_##lname, &page->flags); }
160 #define __CLEARPAGEFLAG(uname, lname) \
161 static inline void __ClearPage##uname(struct page *page) \
162 { __clear_bit(PG_##lname, &page->flags); }
164 #define TESTSETFLAG(uname, lname) \
165 static inline int TestSetPage##uname(struct page *page) \
166 { return test_and_set_bit(PG_##lname, &page->flags); }
168 #define TESTCLEARFLAG(uname, lname) \
169 static inline int TestClearPage##uname(struct page *page) \
170 { return test_and_clear_bit(PG_##lname, &page->flags); }
172 #define __TESTCLEARFLAG(uname, lname) \
173 static inline int __TestClearPage##uname(struct page *page) \
174 { return __test_and_clear_bit(PG_##lname, &page->flags); }
176 #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
177 SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
179 #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
180 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
182 #define TESTSCFLAG(uname, lname) \
183 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
185 #define TESTPAGEFLAG_FALSE(uname) \
186 static inline int Page##uname(const struct page *page) { return 0; }
188 #define SETPAGEFLAG_NOOP(uname) \
189 static inline void SetPage##uname(struct page *page) { }
191 #define CLEARPAGEFLAG_NOOP(uname) \
192 static inline void ClearPage##uname(struct page *page) { }
194 #define __CLEARPAGEFLAG_NOOP(uname) \
195 static inline void __ClearPage##uname(struct page *page) { }
197 #define TESTSETFLAG_FALSE(uname) \
198 static inline int TestSetPage##uname(struct page *page) { return 0; }
200 #define TESTCLEARFLAG_FALSE(uname) \
201 static inline int TestClearPage##uname(struct page *page) { return 0; }
203 #define __TESTCLEARFLAG_FALSE(uname) \
204 static inline int __TestClearPage##uname(struct page *page) { return 0; }
206 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
207 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
209 #define TESTSCFLAG_FALSE(uname) \
210 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
212 struct page; /* forward declaration */
214 TESTPAGEFLAG(Locked, locked)
215 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
216 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
217 __SETPAGEFLAG(Referenced, referenced)
218 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
219 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
220 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
221 TESTCLEARFLAG(Active, active)
222 __PAGEFLAG(Slab, slab)
223 PAGEFLAG(Checked, checked) /* Used by some filesystems */
224 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
225 PAGEFLAG(SavePinned, savepinned); /* Xen */
226 PAGEFLAG(Foreign, foreign); /* Xen */
227 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
228 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
229 __SETPAGEFLAG(SwapBacked, swapbacked)
231 __PAGEFLAG(SlobFree, slob_free)
234 * Private page markings that may be used by the filesystem that owns the page
235 * for its own purposes.
236 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
238 PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
239 __CLEARPAGEFLAG(Private, private)
240 PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
241 PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
244 * Only test-and-set exist for PG_writeback. The unconditional operators are
245 * risky: they bypass page accounting.
247 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
248 PAGEFLAG(MappedToDisk, mappedtodisk)
250 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
251 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
252 PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
254 #ifdef CONFIG_HIGHMEM
256 * Must use a macro here due to header dependency issues. page_zone() is not
257 * available at this point.
259 #define PageHighMem(__p) is_highmem(page_zone(__p))
261 PAGEFLAG_FALSE(HighMem)
265 PAGEFLAG(SwapCache, swapcache)
267 PAGEFLAG_FALSE(SwapCache)
270 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
271 TESTCLEARFLAG(Unevictable, unevictable)
274 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
275 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
277 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
278 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
281 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
282 PAGEFLAG(Uncached, uncached)
284 PAGEFLAG_FALSE(Uncached)
287 #ifdef CONFIG_MEMORY_FAILURE
288 PAGEFLAG(HWPoison, hwpoison)
289 TESTSCFLAG(HWPoison, hwpoison)
290 #define __PG_HWPOISON (1UL << PG_hwpoison)
292 PAGEFLAG_FALSE(HWPoison)
293 #define __PG_HWPOISON 0
296 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
297 TESTPAGEFLAG(Young, young)
298 SETPAGEFLAG(Young, young)
299 TESTCLEARFLAG(Young, young)
304 * On an anonymous page mapped into a user virtual memory area,
305 * page->mapping points to its anon_vma, not to a struct address_space;
306 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
308 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
309 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
310 * and then page->mapping points, not to an anon_vma, but to a private
311 * structure which KSM associates with that merged page. See ksm.h.
313 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
315 * Please note that, confusingly, "page_mapping" refers to the inode
316 * address_space which maps the page from disk; whereas "page_mapped"
317 * refers to user virtual address space into which the page is mapped.
319 #define PAGE_MAPPING_ANON 1
320 #define PAGE_MAPPING_KSM 2
321 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
323 static inline int PageAnon(struct page *page)
325 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
330 * A KSM page is one of those write-protected "shared pages" or "merged pages"
331 * which KSM maps into multiple mms, wherever identical anonymous page content
332 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
333 * anon_vma, but to that page's node of the stable tree.
335 static inline int PageKsm(struct page *page)
337 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
338 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
341 TESTPAGEFLAG_FALSE(Ksm)
344 u64 stable_page_flags(struct page *page);
346 static inline int PageUptodate(struct page *page)
348 int ret = test_bit(PG_uptodate, &(page)->flags);
351 * Must ensure that the data we read out of the page is loaded
352 * _after_ we've loaded page->flags to check for PageUptodate.
353 * We can skip the barrier if the page is not uptodate, because
354 * we wouldn't be reading anything from it.
356 * See SetPageUptodate() for the other side of the story.
364 static inline void __SetPageUptodate(struct page *page)
367 __set_bit(PG_uptodate, &(page)->flags);
370 static inline void SetPageUptodate(struct page *page)
373 * Memory barrier must be issued before setting the PG_uptodate bit,
374 * so that all previous stores issued in order to bring the page
375 * uptodate are actually visible before PageUptodate becomes true.
378 set_bit(PG_uptodate, &(page)->flags);
381 CLEARPAGEFLAG(Uptodate, uptodate)
383 int test_clear_page_writeback(struct page *page);
384 int __test_set_page_writeback(struct page *page, bool keep_write);
386 #define test_set_page_writeback(page) \
387 __test_set_page_writeback(page, false)
388 #define test_set_page_writeback_keepwrite(page) \
389 __test_set_page_writeback(page, true)
391 static inline void set_page_writeback(struct page *page)
393 test_set_page_writeback(page);
396 static inline void set_page_writeback_keepwrite(struct page *page)
398 test_set_page_writeback_keepwrite(page);
401 #ifdef CONFIG_PAGEFLAGS_EXTENDED
403 * System with lots of page flags available. This allows separate
404 * flags for PageHead() and PageTail() checks of compound pages so that bit
405 * tests can be used in performance sensitive paths. PageCompound is
406 * generally not used in hot code paths except arch/powerpc/mm/init_64.c
407 * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
408 * and avoid handling those in real mode.
410 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
411 __PAGEFLAG(Tail, tail)
413 static inline int PageCompound(struct page *page)
415 return page->flags & ((1L << PG_head) | (1L << PG_tail));
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 static inline void ClearPageCompound(struct page *page)
421 BUG_ON(!PageHead(page));
426 #define PG_head_mask ((1L << PG_head))
430 * Reduce page flag use as much as possible by overlapping
431 * compound page flags with the flags used for page cache pages. Possible
432 * because PageCompound is always set for compound pages and not for
433 * pages on the LRU and/or pagecache.
435 TESTPAGEFLAG(Compound, compound)
436 __SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
439 * PG_reclaim is used in combination with PG_compound to mark the
440 * head and tail of a compound page. This saves one page flag
441 * but makes it impossible to use compound pages for the page cache.
442 * The PG_reclaim bit would have to be used for reclaim or readahead
443 * if compound pages enter the page cache.
445 * PG_compound & PG_reclaim => Tail page
446 * PG_compound & ~PG_reclaim => Head page
448 #define PG_head_mask ((1L << PG_compound))
449 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
451 static inline int PageHead(struct page *page)
453 return ((page->flags & PG_head_tail_mask) == PG_head_mask);
456 static inline int PageTail(struct page *page)
458 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
461 static inline void __SetPageTail(struct page *page)
463 page->flags |= PG_head_tail_mask;
466 static inline void __ClearPageTail(struct page *page)
468 page->flags &= ~PG_head_tail_mask;
471 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
472 static inline void ClearPageCompound(struct page *page)
474 BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
475 clear_bit(PG_compound, &page->flags);
479 #endif /* !PAGEFLAGS_EXTENDED */
481 #ifdef CONFIG_HUGETLB_PAGE
482 int PageHuge(struct page *page);
483 int PageHeadHuge(struct page *page);
484 bool page_huge_active(struct page *page);
486 TESTPAGEFLAG_FALSE(Huge)
487 TESTPAGEFLAG_FALSE(HeadHuge)
489 static inline bool page_huge_active(struct page *page)
496 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
498 * PageHuge() only returns true for hugetlbfs pages, but not for
499 * normal or transparent huge pages.
501 * PageTransHuge() returns true for both transparent huge and
502 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
503 * called only in the core VM paths where hugetlbfs pages can't exist.
505 static inline int PageTransHuge(struct page *page)
507 VM_BUG_ON_PAGE(PageTail(page), page);
508 return PageHead(page);
512 * PageTransCompound returns true for both transparent huge pages
513 * and hugetlbfs pages, so it should only be called when it's known
514 * that hugetlbfs pages aren't involved.
516 static inline int PageTransCompound(struct page *page)
518 return PageCompound(page);
522 * PageTransTail returns true for both transparent huge pages
523 * and hugetlbfs pages, so it should only be called when it's known
524 * that hugetlbfs pages aren't involved.
526 static inline int PageTransTail(struct page *page)
528 return PageTail(page);
533 static inline int PageTransHuge(struct page *page)
538 static inline int PageTransCompound(struct page *page)
543 static inline int PageTransTail(struct page *page)
550 * PageBuddy() indicate that the page is free and in the buddy system
551 * (see mm/page_alloc.c).
553 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
554 * -2 so that an underflow of the page_mapcount() won't be mistaken
555 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
556 * efficiently by most CPU architectures.
558 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
560 static inline int PageBuddy(struct page *page)
562 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
565 static inline void __SetPageBuddy(struct page *page)
567 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
568 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
571 static inline void __ClearPageBuddy(struct page *page)
573 VM_BUG_ON_PAGE(!PageBuddy(page), page);
574 atomic_set(&page->_mapcount, -1);
577 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
579 static inline int PageBalloon(struct page *page)
581 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
584 static inline void __SetPageBalloon(struct page *page)
586 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
587 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
590 static inline void __ClearPageBalloon(struct page *page)
592 VM_BUG_ON_PAGE(!PageBalloon(page), page);
593 atomic_set(&page->_mapcount, -1);
597 * If network-based swap is enabled, sl*b must keep track of whether pages
598 * were allocated from pfmemalloc reserves.
600 static inline int PageSlabPfmemalloc(struct page *page)
602 VM_BUG_ON_PAGE(!PageSlab(page), page);
603 return PageActive(page);
606 static inline void SetPageSlabPfmemalloc(struct page *page)
608 VM_BUG_ON_PAGE(!PageSlab(page), page);
612 static inline void __ClearPageSlabPfmemalloc(struct page *page)
614 VM_BUG_ON_PAGE(!PageSlab(page), page);
615 __ClearPageActive(page);
618 static inline void ClearPageSlabPfmemalloc(struct page *page)
620 VM_BUG_ON_PAGE(!PageSlab(page), page);
621 ClearPageActive(page);
625 #define __PG_MLOCKED (1 << PG_mlocked)
627 #define __PG_MLOCKED 0
630 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
631 #define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
633 #define __PG_COMPOUND_LOCK 0
637 * Flags checked when a page is freed. Pages being freed should not have
638 * these flags set. It they are, there is a problem.
640 #define PAGE_FLAGS_CHECK_AT_FREE \
641 (1 << PG_lru | 1 << PG_locked | \
642 1 << PG_private | 1 << PG_private_2 | \
643 1 << PG_writeback | 1 << PG_reserved | \
644 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
645 1 << PG_unevictable | __PG_MLOCKED | \
649 * Flags checked when a page is prepped for return by the page allocator.
650 * Pages being prepped should not have these flags set. It they are set,
651 * there has been a kernel bug or struct page corruption.
653 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
654 * alloc-free cycle to prevent from reusing the page.
656 #define PAGE_FLAGS_CHECK_AT_PREP \
657 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
659 #define PAGE_FLAGS_PRIVATE \
660 (1 << PG_private | 1 << PG_private_2)
662 * page_has_private - Determine if page has private stuff
663 * @page: The page to be checked
665 * Determine if a page has private stuff, indicating that release routines
666 * should be invoked upon it.
668 static inline int page_has_private(struct page *page)
670 return !!(page->flags & PAGE_FLAGS_PRIVATE);
673 #endif /* !__GENERATING_BOUNDS_H */
675 #endif /* PAGE_FLAGS_H */