4 * Copyright (C) 1994-2006 Linus Torvalds
8 * The mincore() system call.
10 #include <linux/pagemap.h>
11 #include <linux/gfp.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 unsigned long addr, unsigned long end,
26 #ifdef CONFIG_HUGETLB_PAGE
31 unsigned char present;
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
37 ptep = huge_pte_offset(current->mm,
38 addr & huge_page_mask(h));
39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
46 /* check hugepage border */
47 if (!(addr & ~huge_page_mask(h)))
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
62 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
64 unsigned char present = 0;
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
74 if (shmem_mapping(mapping)) {
75 page = find_get_entry(mapping, pgoff);
77 * shmem/tmpfs may return swap: account for swapcache
80 if (radix_tree_exceptional_entry(page)) {
81 swp_entry_t swp = radix_to_swp_entry(page);
82 page = find_get_page(swap_address_space(swp), swp.val);
85 page = find_get_page(mapping, pgoff);
87 page = find_get_page(mapping, pgoff);
90 present = PageUptodate(page);
91 page_cache_release(page);
97 static void mincore_unmapped_range(struct vm_area_struct *vma,
98 unsigned long addr, unsigned long end,
101 unsigned long nr = (end - addr) >> PAGE_SHIFT;
107 pgoff = linear_page_index(vma, addr);
108 for (i = 0; i < nr; i++, pgoff++)
109 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
111 for (i = 0; i < nr; i++)
116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
117 unsigned long addr, unsigned long end,
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
129 next = addr + PAGE_SIZE;
131 mincore_unmapped_range(vma, addr, next, vec);
132 else if (pte_present(pte))
134 else if (pte_file(pte)) {
135 pgoff = pte_to_pgoff(pte);
136 *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
137 } else { /* pte is a swap entry */
138 swp_entry_t entry = pte_to_swp_entry(pte);
140 if (non_swap_entry(entry)) {
142 * migration or hwpoison entries are always
149 *vec = mincore_page(swap_address_space(entry),
158 } while (ptep++, addr = next, addr != end);
159 pte_unmap_unlock(ptep - 1, ptl);
162 static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
163 unsigned long addr, unsigned long end,
169 pmd = pmd_offset(pud, addr);
171 next = pmd_addr_end(addr, end);
172 if (pmd_trans_huge(*pmd)) {
173 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
174 vec += (next - addr) >> PAGE_SHIFT;
179 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
180 mincore_unmapped_range(vma, addr, next, vec);
182 mincore_pte_range(vma, pmd, addr, next, vec);
183 vec += (next - addr) >> PAGE_SHIFT;
184 } while (pmd++, addr = next, addr != end);
187 static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
188 unsigned long addr, unsigned long end,
194 pud = pud_offset(pgd, addr);
196 next = pud_addr_end(addr, end);
197 if (pud_none_or_clear_bad(pud))
198 mincore_unmapped_range(vma, addr, next, vec);
200 mincore_pmd_range(vma, pud, addr, next, vec);
201 vec += (next - addr) >> PAGE_SHIFT;
202 } while (pud++, addr = next, addr != end);
205 static void mincore_page_range(struct vm_area_struct *vma,
206 unsigned long addr, unsigned long end,
212 pgd = pgd_offset(vma->vm_mm, addr);
214 next = pgd_addr_end(addr, end);
215 if (pgd_none_or_clear_bad(pgd))
216 mincore_unmapped_range(vma, addr, next, vec);
218 mincore_pud_range(vma, pgd, addr, next, vec);
219 vec += (next - addr) >> PAGE_SHIFT;
220 } while (pgd++, addr = next, addr != end);
224 * Do a chunk of "sys_mincore()". We've already checked
225 * all the arguments, we hold the mmap semaphore: we should
226 * just return the amount of info we're asked for.
228 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
230 struct vm_area_struct *vma;
233 vma = find_vma(current->mm, addr);
234 if (!vma || addr < vma->vm_start)
237 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
239 if (is_vm_hugetlb_page(vma))
240 mincore_hugetlb_page_range(vma, addr, end, vec);
242 mincore_page_range(vma, addr, end, vec);
244 return (end - addr) >> PAGE_SHIFT;
248 * The mincore(2) system call.
250 * mincore() returns the memory residency status of the pages in the
251 * current process's address space specified by [addr, addr + len).
252 * The status is returned in a vector of bytes. The least significant
253 * bit of each byte is 1 if the referenced page is in memory, otherwise
256 * Because the status of a page can change after mincore() checks it
257 * but before it returns to the application, the returned vector may
258 * contain stale information. Only locked pages are guaranteed to
263 * -EFAULT - vec points to an illegal address
264 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
265 * -ENOMEM - Addresses in the range [addr, addr + len] are
266 * invalid for the address space of this process, or
267 * specify one or more pages which are not currently
269 * -EAGAIN - A kernel resource was temporarily unavailable.
271 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
272 unsigned char __user *, vec)
278 /* Check the start address: needs to be page-aligned.. */
279 if (start & ~PAGE_CACHE_MASK)
282 /* ..and we need to be passed a valid user-space range */
283 if (!access_ok(VERIFY_READ, (void __user *) start, len))
286 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
287 pages = len >> PAGE_SHIFT;
288 pages += (len & ~PAGE_MASK) != 0;
290 if (!access_ok(VERIFY_WRITE, vec, pages))
293 tmp = (void *) __get_free_page(GFP_USER);
300 * Do at most PAGE_SIZE entries per iteration, due to
301 * the temporary buffer size.
303 down_read(¤t->mm->mmap_sem);
304 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
305 up_read(¤t->mm->mmap_sem);
309 if (copy_to_user(vec, tmp, retval)) {
315 start += retval << PAGE_SHIFT;
318 free_page((unsigned long) tmp);