2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
12 pte = pte_offset_map(pmd, addr);
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
34 pmd = pmd_offset(pud, addr);
37 next = pmd_addr_end(addr, end);
40 err = walk->pte_hole(addr, next, walk);
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
50 err = walk->pmd_entry(pmd, addr, next, walk);
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
61 split_huge_page_pmd_mm(walk->mm, addr, pmd);
62 if (pmd_trans_unstable(pmd))
64 err = walk_pte_range(pmd, addr, next, walk);
67 } while (pmd++, addr = next, addr != end);
72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
79 pud = pud_offset(pgd, addr);
81 next = pud_addr_end(addr, end);
82 if (pud_none_or_clear_bad(pud)) {
84 err = walk->pte_hole(addr, next, walk);
89 if (walk->pmd_entry || walk->pte_entry)
90 err = walk_pmd_range(pud, addr, next, walk);
93 } while (pud++, addr = next, addr != end);
98 static int walk_pgd_range(unsigned long addr, unsigned long end,
105 pgd = pgd_offset(walk->mm, addr);
107 next = pgd_addr_end(addr, end);
108 if (pgd_none_or_clear_bad(pgd)) {
110 err = walk->pte_hole(addr, next, walk);
115 if (walk->pmd_entry || walk->pte_entry)
116 err = walk_pud_range(pgd, addr, next, walk);
119 } while (pgd++, addr = next, addr != end);
124 #ifdef CONFIG_HUGETLB_PAGE
125 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
128 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
129 return boundary < end ? boundary : end;
132 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
133 struct mm_walk *walk)
135 struct vm_area_struct *vma = walk->vma;
136 struct hstate *h = hstate_vma(vma);
138 unsigned long hmask = huge_page_mask(h);
143 next = hugetlb_entry_end(h, addr, end);
144 pte = huge_pte_offset(walk->mm, addr & hmask);
145 if (pte && walk->hugetlb_entry)
146 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
149 } while (addr = next, addr != end);
154 #else /* CONFIG_HUGETLB_PAGE */
155 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
156 struct mm_walk *walk)
161 #endif /* CONFIG_HUGETLB_PAGE */
164 * Decide whether we really walk over the current vma on [@start, @end)
165 * or skip it via the returned value. Return 0 if we do walk over the
166 * current vma, and return 1 if we skip the vma. Negative values means
167 * error, where we abort the current walk.
169 * Default check (only VM_PFNMAP check for now) is used when the caller
170 * doesn't define test_walk() callback.
172 static int walk_page_test(unsigned long start, unsigned long end,
173 struct mm_walk *walk)
175 struct vm_area_struct *vma = walk->vma;
178 return walk->test_walk(start, end, walk);
181 * Do not walk over vma(VM_PFNMAP), because we have no valid struct
182 * page backing a VM_PFNMAP range. See also commit a9ff785e4437.
184 if (vma->vm_flags & VM_PFNMAP)
189 static int __walk_page_range(unsigned long start, unsigned long end,
190 struct mm_walk *walk)
193 struct vm_area_struct *vma = walk->vma;
195 if (vma && is_vm_hugetlb_page(vma)) {
196 if (walk->hugetlb_entry)
197 err = walk_hugetlb_range(start, end, walk);
199 err = walk_pgd_range(start, end, walk);
205 * walk_page_range - walk page table with caller specific callbacks
207 * Recursively walk the page table tree of the process represented by @walk->mm
208 * within the virtual address range [@start, @end). During walking, we can do
209 * some caller-specific works for each entry, by setting up pmd_entry(),
210 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
211 * callbacks, the associated entries/pages are just ignored.
212 * The return values of these callbacks are commonly defined like below:
213 * - 0 : succeeded to handle the current entry, and if you don't reach the
214 * end address yet, continue to walk.
215 * - >0 : succeeded to handle the current entry, and return to the caller
216 * with caller specific value.
217 * - <0 : failed to handle the current entry, and return to the caller
220 * Before starting to walk page table, some callers want to check whether
221 * they really want to walk over the current vma, typically by checking
222 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
225 * struct mm_walk keeps current values of some common data like vma and pmd,
226 * which are useful for the access from callbacks. If you want to pass some
227 * caller-specific data to callbacks, @walk->private should be helpful.
230 * Callers of walk_page_range() and walk_page_vma() should hold
231 * @walk->mm->mmap_sem, because these function traverse vma list and/or
232 * access to vma's data.
234 int walk_page_range(unsigned long start, unsigned long end,
235 struct mm_walk *walk)
239 struct vm_area_struct *vma;
247 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
249 vma = find_vma(walk->mm, start);
251 if (!vma) { /* after the last vma */
254 } else if (start < vma->vm_start) { /* outside vma */
256 next = min(end, vma->vm_start);
257 } else { /* inside vma */
259 next = min(end, vma->vm_end);
262 err = walk_page_test(start, next, walk);
268 if (walk->vma || walk->pte_hole)
269 err = __walk_page_range(start, next, walk);
272 } while (start = next, start < end);
276 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
283 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
286 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
291 return __walk_page_range(vma->vm_start, vma->vm_end, walk);