2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #include <linux/mempolicy.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/gfp.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/module.h>
80 #include <linux/nsproxy.h>
81 #include <linux/interrupt.h>
82 #include <linux/init.h>
83 #include <linux/compat.h>
84 #include <linux/swap.h>
85 #include <linux/seq_file.h>
86 #include <linux/proc_fs.h>
87 #include <linux/migrate.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
94 #include <asm/tlbflush.h>
95 #include <asm/uaccess.h>
100 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
101 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
102 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
104 static struct kmem_cache *policy_cache;
105 static struct kmem_cache *sn_cache;
107 /* Highest zone. An specific allocation for a zone below that is not
109 enum zone_type policy_zone = 0;
112 * run-time system-wide default policy => local allocation
114 struct mempolicy default_policy = {
115 .refcnt = ATOMIC_INIT(1), /* never free it */
116 .mode = MPOL_PREFERRED,
117 .flags = MPOL_F_LOCAL,
120 static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
123 } mpol_ops[MPOL_MAX];
125 /* Check that the nodemask contains at least one populated zone */
126 static int is_valid_nodemask(const nodemask_t *nodemask)
130 /* Check that there is something useful in this mask */
133 for_each_node_mask(nd, *nodemask) {
136 for (k = 0; k <= policy_zone; k++) {
137 z = &NODE_DATA(nd)->node_zones[k];
138 if (z->present_pages > 0)
146 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
148 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
151 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
152 const nodemask_t *rel)
155 nodes_fold(tmp, *orig, nodes_weight(*rel));
156 nodes_onto(*ret, tmp, *rel);
159 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
161 if (nodes_empty(*nodes))
163 pol->v.nodes = *nodes;
167 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
170 pol->flags |= MPOL_F_LOCAL; /* local allocation */
171 else if (nodes_empty(*nodes))
172 return -EINVAL; /* no allowed nodes */
174 pol->v.preferred_node = first_node(*nodes);
178 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
180 if (!is_valid_nodemask(nodes))
182 pol->v.nodes = *nodes;
187 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
188 * any, for the new policy. mpol_new() has already validated the nodes
189 * parameter with respect to the policy mode and flags. But, we need to
190 * handle an empty nodemask with MPOL_PREFERRED here.
192 * Must be called holding task's alloc_lock to protect task's mems_allowed
193 * and mempolicy. May also be called holding the mmap_semaphore for write.
195 static int mpol_set_nodemask(struct mempolicy *pol,
196 const nodemask_t *nodes, struct nodemask_scratch *nsc)
200 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
203 /* Check N_HIGH_MEMORY */
204 nodes_and(nsc->mask1,
205 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
208 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
209 nodes = NULL; /* explicit local allocation */
211 if (pol->flags & MPOL_F_RELATIVE_NODES)
212 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
214 nodes_and(nsc->mask2, *nodes, nsc->mask1);
216 if (mpol_store_user_nodemask(pol))
217 pol->w.user_nodemask = *nodes;
219 pol->w.cpuset_mems_allowed =
220 cpuset_current_mems_allowed;
224 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
226 ret = mpol_ops[pol->mode].create(pol, NULL);
231 * This function just creates a new policy, does some check and simple
232 * initialization. You must invoke mpol_set_nodemask() to set nodes.
234 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
237 struct mempolicy *policy;
239 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
240 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
242 if (mode == MPOL_DEFAULT) {
243 if (nodes && !nodes_empty(*nodes))
244 return ERR_PTR(-EINVAL);
245 return NULL; /* simply delete any existing policy */
250 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
251 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
252 * All other modes require a valid pointer to a non-empty nodemask.
254 if (mode == MPOL_PREFERRED) {
255 if (nodes_empty(*nodes)) {
256 if (((flags & MPOL_F_STATIC_NODES) ||
257 (flags & MPOL_F_RELATIVE_NODES)))
258 return ERR_PTR(-EINVAL);
260 } else if (nodes_empty(*nodes))
261 return ERR_PTR(-EINVAL);
262 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
264 return ERR_PTR(-ENOMEM);
265 atomic_set(&policy->refcnt, 1);
267 policy->flags = flags;
272 /* Slow path of a mpol destructor. */
273 void __mpol_put(struct mempolicy *p)
275 if (!atomic_dec_and_test(&p->refcnt))
277 kmem_cache_free(policy_cache, p);
280 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
284 static void mpol_rebind_nodemask(struct mempolicy *pol,
285 const nodemask_t *nodes)
289 if (pol->flags & MPOL_F_STATIC_NODES)
290 nodes_and(tmp, pol->w.user_nodemask, *nodes);
291 else if (pol->flags & MPOL_F_RELATIVE_NODES)
292 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
294 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
296 pol->w.cpuset_mems_allowed = *nodes;
300 if (!node_isset(current->il_next, tmp)) {
301 current->il_next = next_node(current->il_next, tmp);
302 if (current->il_next >= MAX_NUMNODES)
303 current->il_next = first_node(tmp);
304 if (current->il_next >= MAX_NUMNODES)
305 current->il_next = numa_node_id();
309 static void mpol_rebind_preferred(struct mempolicy *pol,
310 const nodemask_t *nodes)
314 if (pol->flags & MPOL_F_STATIC_NODES) {
315 int node = first_node(pol->w.user_nodemask);
317 if (node_isset(node, *nodes)) {
318 pol->v.preferred_node = node;
319 pol->flags &= ~MPOL_F_LOCAL;
321 pol->flags |= MPOL_F_LOCAL;
322 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
323 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
324 pol->v.preferred_node = first_node(tmp);
325 } else if (!(pol->flags & MPOL_F_LOCAL)) {
326 pol->v.preferred_node = node_remap(pol->v.preferred_node,
327 pol->w.cpuset_mems_allowed,
329 pol->w.cpuset_mems_allowed = *nodes;
333 /* Migrate a policy to a different set of nodes */
334 static void mpol_rebind_policy(struct mempolicy *pol,
335 const nodemask_t *newmask)
339 if (!mpol_store_user_nodemask(pol) &&
340 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
342 mpol_ops[pol->mode].rebind(pol, newmask);
346 * Wrapper for mpol_rebind_policy() that just requires task
347 * pointer, and updates task mempolicy.
349 * Called with task's alloc_lock held.
352 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
354 mpol_rebind_policy(tsk->mempolicy, new);
358 * Rebind each vma in mm to new nodemask.
360 * Call holding a reference to mm. Takes mm->mmap_sem during call.
363 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
365 struct vm_area_struct *vma;
367 down_write(&mm->mmap_sem);
368 for (vma = mm->mmap; vma; vma = vma->vm_next)
369 mpol_rebind_policy(vma->vm_policy, new);
370 up_write(&mm->mmap_sem);
373 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
375 .rebind = mpol_rebind_default,
377 [MPOL_INTERLEAVE] = {
378 .create = mpol_new_interleave,
379 .rebind = mpol_rebind_nodemask,
382 .create = mpol_new_preferred,
383 .rebind = mpol_rebind_preferred,
386 .create = mpol_new_bind,
387 .rebind = mpol_rebind_nodemask,
391 static void gather_stats(struct page *, void *, int pte_dirty);
392 static void migrate_page_add(struct page *page, struct list_head *pagelist,
393 unsigned long flags);
395 /* Scan through pages checking if pages follow certain conditions. */
396 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
397 unsigned long addr, unsigned long end,
398 const nodemask_t *nodes, unsigned long flags,
405 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
410 if (!pte_present(*pte))
412 page = vm_normal_page(vma, addr, *pte);
416 * The check for PageReserved here is important to avoid
417 * handling zero pages and other pages that may have been
418 * marked special by the system.
420 * If the PageReserved would not be checked here then f.e.
421 * the location of the zero page could have an influence
422 * on MPOL_MF_STRICT, zero pages would be counted for
423 * the per node stats, and there would be useless attempts
424 * to put zero pages on the migration list.
426 if (PageReserved(page))
428 nid = page_to_nid(page);
429 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
432 if (flags & MPOL_MF_STATS)
433 gather_stats(page, private, pte_dirty(*pte));
434 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
435 migrate_page_add(page, private, flags);
438 } while (pte++, addr += PAGE_SIZE, addr != end);
439 pte_unmap_unlock(orig_pte, ptl);
443 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
444 unsigned long addr, unsigned long end,
445 const nodemask_t *nodes, unsigned long flags,
451 pmd = pmd_offset(pud, addr);
453 next = pmd_addr_end(addr, end);
454 if (pmd_none_or_clear_bad(pmd))
456 if (check_pte_range(vma, pmd, addr, next, nodes,
459 } while (pmd++, addr = next, addr != end);
463 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
464 unsigned long addr, unsigned long end,
465 const nodemask_t *nodes, unsigned long flags,
471 pud = pud_offset(pgd, addr);
473 next = pud_addr_end(addr, end);
474 if (pud_none_or_clear_bad(pud))
476 if (check_pmd_range(vma, pud, addr, next, nodes,
479 } while (pud++, addr = next, addr != end);
483 static inline int check_pgd_range(struct vm_area_struct *vma,
484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
491 pgd = pgd_offset(vma->vm_mm, addr);
493 next = pgd_addr_end(addr, end);
494 if (pgd_none_or_clear_bad(pgd))
496 if (check_pud_range(vma, pgd, addr, next, nodes,
499 } while (pgd++, addr = next, addr != end);
504 * Check if all pages in a range are on a set of nodes.
505 * If pagelist != NULL then isolate pages from the LRU and
506 * put them on the pagelist.
508 static struct vm_area_struct *
509 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
510 const nodemask_t *nodes, unsigned long flags, void *private)
513 struct vm_area_struct *first, *vma, *prev;
516 first = find_vma(mm, start);
518 return ERR_PTR(-EFAULT);
520 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
521 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
522 if (!vma->vm_next && vma->vm_end < end)
523 return ERR_PTR(-EFAULT);
524 if (prev && prev->vm_end < vma->vm_start)
525 return ERR_PTR(-EFAULT);
527 if (!is_vm_hugetlb_page(vma) &&
528 ((flags & MPOL_MF_STRICT) ||
529 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
530 vma_migratable(vma)))) {
531 unsigned long endvma = vma->vm_end;
535 if (vma->vm_start > start)
536 start = vma->vm_start;
537 err = check_pgd_range(vma, start, endvma, nodes,
540 first = ERR_PTR(err);
549 /* Apply policy to a single VMA */
550 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
553 struct mempolicy *old = vma->vm_policy;
555 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
556 vma->vm_start, vma->vm_end, vma->vm_pgoff,
557 vma->vm_ops, vma->vm_file,
558 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
560 if (vma->vm_ops && vma->vm_ops->set_policy)
561 err = vma->vm_ops->set_policy(vma, new);
564 vma->vm_policy = new;
570 /* Step 2: apply policy to a range and do splits. */
571 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
572 unsigned long end, struct mempolicy *new)
574 struct vm_area_struct *next;
578 for (; vma && vma->vm_start < end; vma = next) {
580 if (vma->vm_start < start)
581 err = split_vma(vma->vm_mm, vma, start, 1);
582 if (!err && vma->vm_end > end)
583 err = split_vma(vma->vm_mm, vma, end, 0);
585 err = policy_vma(vma, new);
593 * Update task->flags PF_MEMPOLICY bit: set iff non-default
594 * mempolicy. Allows more rapid checking of this (combined perhaps
595 * with other PF_* flag bits) on memory allocation hot code paths.
597 * If called from outside this file, the task 'p' should -only- be
598 * a newly forked child not yet visible on the task list, because
599 * manipulating the task flags of a visible task is not safe.
601 * The above limitation is why this routine has the funny name
602 * mpol_fix_fork_child_flag().
604 * It is also safe to call this with a task pointer of current,
605 * which the static wrapper mpol_set_task_struct_flag() does,
606 * for use within this file.
609 void mpol_fix_fork_child_flag(struct task_struct *p)
612 p->flags |= PF_MEMPOLICY;
614 p->flags &= ~PF_MEMPOLICY;
617 static void mpol_set_task_struct_flag(void)
619 mpol_fix_fork_child_flag(current);
622 /* Set the process memory policy */
623 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
626 struct mempolicy *new, *old;
627 struct mm_struct *mm = current->mm;
628 NODEMASK_SCRATCH(scratch);
634 new = mpol_new(mode, flags, nodes);
640 * prevent changing our mempolicy while show_numa_maps()
642 * Note: do_set_mempolicy() can be called at init time
646 down_write(&mm->mmap_sem);
648 ret = mpol_set_nodemask(new, nodes, scratch);
650 task_unlock(current);
652 up_write(&mm->mmap_sem);
656 old = current->mempolicy;
657 current->mempolicy = new;
658 mpol_set_task_struct_flag();
659 if (new && new->mode == MPOL_INTERLEAVE &&
660 nodes_weight(new->v.nodes))
661 current->il_next = first_node(new->v.nodes);
662 task_unlock(current);
664 up_write(&mm->mmap_sem);
669 NODEMASK_SCRATCH_FREE(scratch);
674 * Return nodemask for policy for get_mempolicy() query
676 * Called with task's alloc_lock held
678 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
681 if (p == &default_policy)
687 case MPOL_INTERLEAVE:
691 if (!(p->flags & MPOL_F_LOCAL))
692 node_set(p->v.preferred_node, *nodes);
693 /* else return empty node mask for local allocation */
700 static int lookup_node(struct mm_struct *mm, unsigned long addr)
705 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
707 err = page_to_nid(p);
713 /* Retrieve NUMA policy */
714 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
715 unsigned long addr, unsigned long flags)
718 struct mm_struct *mm = current->mm;
719 struct vm_area_struct *vma = NULL;
720 struct mempolicy *pol = current->mempolicy;
723 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
726 if (flags & MPOL_F_MEMS_ALLOWED) {
727 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
729 *policy = 0; /* just so it's initialized */
731 *nmask = cpuset_current_mems_allowed;
732 task_unlock(current);
736 if (flags & MPOL_F_ADDR) {
738 * Do NOT fall back to task policy if the
739 * vma/shared policy at addr is NULL. We
740 * want to return MPOL_DEFAULT in this case.
742 down_read(&mm->mmap_sem);
743 vma = find_vma_intersection(mm, addr, addr+1);
745 up_read(&mm->mmap_sem);
748 if (vma->vm_ops && vma->vm_ops->get_policy)
749 pol = vma->vm_ops->get_policy(vma, addr);
751 pol = vma->vm_policy;
756 pol = &default_policy; /* indicates default behavior */
758 if (flags & MPOL_F_NODE) {
759 if (flags & MPOL_F_ADDR) {
760 err = lookup_node(mm, addr);
764 } else if (pol == current->mempolicy &&
765 pol->mode == MPOL_INTERLEAVE) {
766 *policy = current->il_next;
772 *policy = pol == &default_policy ? MPOL_DEFAULT :
775 * Internal mempolicy flags must be masked off before exposing
776 * the policy to userspace.
778 *policy |= (pol->flags & MPOL_MODE_FLAGS);
782 up_read(¤t->mm->mmap_sem);
789 get_policy_nodemask(pol, nmask);
790 task_unlock(current);
796 up_read(¤t->mm->mmap_sem);
800 #ifdef CONFIG_MIGRATION
804 static void migrate_page_add(struct page *page, struct list_head *pagelist,
808 * Avoid migrating a page that is shared with others.
810 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
811 if (!isolate_lru_page(page)) {
812 list_add_tail(&page->lru, pagelist);
813 inc_zone_page_state(page, NR_ISOLATED_ANON +
814 page_is_file_cache(page));
819 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
821 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
825 * Migrate pages from one node to a target node.
826 * Returns error or the number of pages not migrated.
828 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
836 node_set(source, nmask);
838 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
839 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
841 if (!list_empty(&pagelist))
842 err = migrate_pages(&pagelist, new_node_page, dest);
848 * Move pages between the two nodesets so as to preserve the physical
849 * layout as much as possible.
851 * Returns the number of page that could not be moved.
853 int do_migrate_pages(struct mm_struct *mm,
854 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
860 err = migrate_prep();
864 down_read(&mm->mmap_sem);
866 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
871 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
872 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
873 * bit in 'tmp', and return that <source, dest> pair for migration.
874 * The pair of nodemasks 'to' and 'from' define the map.
876 * If no pair of bits is found that way, fallback to picking some
877 * pair of 'source' and 'dest' bits that are not the same. If the
878 * 'source' and 'dest' bits are the same, this represents a node
879 * that will be migrating to itself, so no pages need move.
881 * If no bits are left in 'tmp', or if all remaining bits left
882 * in 'tmp' correspond to the same bit in 'to', return false
883 * (nothing left to migrate).
885 * This lets us pick a pair of nodes to migrate between, such that
886 * if possible the dest node is not already occupied by some other
887 * source node, minimizing the risk of overloading the memory on a
888 * node that would happen if we migrated incoming memory to a node
889 * before migrating outgoing memory source that same node.
891 * A single scan of tmp is sufficient. As we go, we remember the
892 * most recent <s, d> pair that moved (s != d). If we find a pair
893 * that not only moved, but what's better, moved to an empty slot
894 * (d is not set in tmp), then we break out then, with that pair.
895 * Otherwise when we finish scannng from_tmp, we at least have the
896 * most recent <s, d> pair that moved. If we get all the way through
897 * the scan of tmp without finding any node that moved, much less
898 * moved to an empty node, then there is nothing left worth migrating.
902 while (!nodes_empty(tmp)) {
907 for_each_node_mask(s, tmp) {
908 d = node_remap(s, *from_nodes, *to_nodes);
912 source = s; /* Node moved. Memorize */
915 /* dest not in remaining from nodes? */
916 if (!node_isset(dest, tmp))
922 node_clear(source, tmp);
923 err = migrate_to_node(mm, source, dest, flags);
930 up_read(&mm->mmap_sem);
938 * Allocate a new page for page migration based on vma policy.
939 * Start assuming that page is mapped by vma pointed to by @private.
940 * Search forward from there, if not. N.B., this assumes that the
941 * list of pages handed to migrate_pages()--which is how we get here--
942 * is in virtual address order.
944 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
946 struct vm_area_struct *vma = (struct vm_area_struct *)private;
947 unsigned long uninitialized_var(address);
950 address = page_address_in_vma(page, vma);
951 if (address != -EFAULT)
957 * if !vma, alloc_page_vma() will use task or system default policy
959 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
963 static void migrate_page_add(struct page *page, struct list_head *pagelist,
968 int do_migrate_pages(struct mm_struct *mm,
969 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
974 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
980 static long do_mbind(unsigned long start, unsigned long len,
981 unsigned short mode, unsigned short mode_flags,
982 nodemask_t *nmask, unsigned long flags)
984 struct vm_area_struct *vma;
985 struct mm_struct *mm = current->mm;
986 struct mempolicy *new;
991 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
992 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
994 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
997 if (start & ~PAGE_MASK)
1000 if (mode == MPOL_DEFAULT)
1001 flags &= ~MPOL_MF_STRICT;
1003 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1011 new = mpol_new(mode, mode_flags, nmask);
1013 return PTR_ERR(new);
1016 * If we are using the default policy then operation
1017 * on discontinuous address spaces is okay after all
1020 flags |= MPOL_MF_DISCONTIG_OK;
1022 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1023 start, start + len, mode, mode_flags,
1024 nmask ? nodes_addr(*nmask)[0] : -1);
1026 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1028 err = migrate_prep();
1033 NODEMASK_SCRATCH(scratch);
1035 down_write(&mm->mmap_sem);
1037 err = mpol_set_nodemask(new, nmask, scratch);
1038 task_unlock(current);
1040 up_write(&mm->mmap_sem);
1043 NODEMASK_SCRATCH_FREE(scratch);
1048 vma = check_range(mm, start, end, nmask,
1049 flags | MPOL_MF_INVERT, &pagelist);
1055 err = mbind_range(vma, start, end, new);
1057 if (!list_empty(&pagelist))
1058 nr_failed = migrate_pages(&pagelist, new_vma_page,
1059 (unsigned long)vma);
1061 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1064 putback_lru_pages(&pagelist);
1066 up_write(&mm->mmap_sem);
1073 * User space interface with variable sized bitmaps for nodelists.
1076 /* Copy a node mask from user space. */
1077 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1078 unsigned long maxnode)
1081 unsigned long nlongs;
1082 unsigned long endmask;
1085 nodes_clear(*nodes);
1086 if (maxnode == 0 || !nmask)
1088 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1091 nlongs = BITS_TO_LONGS(maxnode);
1092 if ((maxnode % BITS_PER_LONG) == 0)
1095 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1097 /* When the user specified more nodes than supported just check
1098 if the non supported part is all zero. */
1099 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1100 if (nlongs > PAGE_SIZE/sizeof(long))
1102 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1104 if (get_user(t, nmask + k))
1106 if (k == nlongs - 1) {
1112 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1116 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1118 nodes_addr(*nodes)[nlongs-1] &= endmask;
1122 /* Copy a kernel node mask to user space */
1123 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1126 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1127 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1129 if (copy > nbytes) {
1130 if (copy > PAGE_SIZE)
1132 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1136 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1139 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1140 unsigned long, mode, unsigned long __user *, nmask,
1141 unsigned long, maxnode, unsigned, flags)
1145 unsigned short mode_flags;
1147 mode_flags = mode & MPOL_MODE_FLAGS;
1148 mode &= ~MPOL_MODE_FLAGS;
1149 if (mode >= MPOL_MAX)
1151 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1152 (mode_flags & MPOL_F_RELATIVE_NODES))
1154 err = get_nodes(&nodes, nmask, maxnode);
1157 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1160 /* Set the process memory policy */
1161 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1162 unsigned long, maxnode)
1166 unsigned short flags;
1168 flags = mode & MPOL_MODE_FLAGS;
1169 mode &= ~MPOL_MODE_FLAGS;
1170 if ((unsigned int)mode >= MPOL_MAX)
1172 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1174 err = get_nodes(&nodes, nmask, maxnode);
1177 return do_set_mempolicy(mode, flags, &nodes);
1180 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1181 const unsigned long __user *, old_nodes,
1182 const unsigned long __user *, new_nodes)
1184 const struct cred *cred = current_cred(), *tcred;
1185 struct mm_struct *mm;
1186 struct task_struct *task;
1189 nodemask_t task_nodes;
1192 err = get_nodes(&old, old_nodes, maxnode);
1196 err = get_nodes(&new, new_nodes, maxnode);
1200 /* Find the mm_struct */
1201 read_lock(&tasklist_lock);
1202 task = pid ? find_task_by_vpid(pid) : current;
1204 read_unlock(&tasklist_lock);
1207 mm = get_task_mm(task);
1208 read_unlock(&tasklist_lock);
1214 * Check if this process has the right to modify the specified
1215 * process. The right exists if the process has administrative
1216 * capabilities, superuser privileges or the same
1217 * userid as the target process.
1220 tcred = __task_cred(task);
1221 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1222 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1223 !capable(CAP_SYS_NICE)) {
1230 task_nodes = cpuset_mems_allowed(task);
1231 /* Is the user allowed to access the target nodes? */
1232 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1237 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1242 err = security_task_movememory(task);
1246 err = do_migrate_pages(mm, &old, &new,
1247 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1254 /* Retrieve NUMA policy */
1255 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1256 unsigned long __user *, nmask, unsigned long, maxnode,
1257 unsigned long, addr, unsigned long, flags)
1260 int uninitialized_var(pval);
1263 if (nmask != NULL && maxnode < MAX_NUMNODES)
1266 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1271 if (policy && put_user(pval, policy))
1275 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1280 #ifdef CONFIG_COMPAT
1282 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1283 compat_ulong_t __user *nmask,
1284 compat_ulong_t maxnode,
1285 compat_ulong_t addr, compat_ulong_t flags)
1288 unsigned long __user *nm = NULL;
1289 unsigned long nr_bits, alloc_size;
1290 DECLARE_BITMAP(bm, MAX_NUMNODES);
1292 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1293 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1296 nm = compat_alloc_user_space(alloc_size);
1298 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1300 if (!err && nmask) {
1301 err = copy_from_user(bm, nm, alloc_size);
1302 /* ensure entire bitmap is zeroed */
1303 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1304 err |= compat_put_bitmap(nmask, bm, nr_bits);
1310 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1311 compat_ulong_t maxnode)
1314 unsigned long __user *nm = NULL;
1315 unsigned long nr_bits, alloc_size;
1316 DECLARE_BITMAP(bm, MAX_NUMNODES);
1318 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1319 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1322 err = compat_get_bitmap(bm, nmask, nr_bits);
1323 nm = compat_alloc_user_space(alloc_size);
1324 err |= copy_to_user(nm, bm, alloc_size);
1330 return sys_set_mempolicy(mode, nm, nr_bits+1);
1333 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1334 compat_ulong_t mode, compat_ulong_t __user *nmask,
1335 compat_ulong_t maxnode, compat_ulong_t flags)
1338 unsigned long __user *nm = NULL;
1339 unsigned long nr_bits, alloc_size;
1342 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1343 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1346 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1347 nm = compat_alloc_user_space(alloc_size);
1348 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1354 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1360 * get_vma_policy(@task, @vma, @addr)
1361 * @task - task for fallback if vma policy == default
1362 * @vma - virtual memory area whose policy is sought
1363 * @addr - address in @vma for shared policy lookup
1365 * Returns effective policy for a VMA at specified address.
1366 * Falls back to @task or system default policy, as necessary.
1367 * Current or other task's task mempolicy and non-shared vma policies
1368 * are protected by the task's mmap_sem, which must be held for read by
1370 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1371 * count--added by the get_policy() vm_op, as appropriate--to protect against
1372 * freeing by another task. It is the caller's responsibility to free the
1373 * extra reference for shared policies.
1375 static struct mempolicy *get_vma_policy(struct task_struct *task,
1376 struct vm_area_struct *vma, unsigned long addr)
1378 struct mempolicy *pol = task->mempolicy;
1381 if (vma->vm_ops && vma->vm_ops->get_policy) {
1382 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1386 } else if (vma->vm_policy)
1387 pol = vma->vm_policy;
1390 pol = &default_policy;
1395 * Return a nodemask representing a mempolicy for filtering nodes for
1398 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1400 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1401 if (unlikely(policy->mode == MPOL_BIND) &&
1402 gfp_zone(gfp) >= policy_zone &&
1403 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1404 return &policy->v.nodes;
1409 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1410 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1412 int nd = numa_node_id();
1414 switch (policy->mode) {
1415 case MPOL_PREFERRED:
1416 if (!(policy->flags & MPOL_F_LOCAL))
1417 nd = policy->v.preferred_node;
1421 * Normally, MPOL_BIND allocations are node-local within the
1422 * allowed nodemask. However, if __GFP_THISNODE is set and the
1423 * current node is part of the mask, we use the zonelist for
1424 * the first node in the mask instead.
1426 if (unlikely(gfp & __GFP_THISNODE) &&
1427 unlikely(!node_isset(nd, policy->v.nodes)))
1428 nd = first_node(policy->v.nodes);
1430 case MPOL_INTERLEAVE: /* should not happen */
1435 return node_zonelist(nd, gfp);
1438 /* Do dynamic interleaving for a process */
1439 static unsigned interleave_nodes(struct mempolicy *policy)
1442 struct task_struct *me = current;
1445 next = next_node(nid, policy->v.nodes);
1446 if (next >= MAX_NUMNODES)
1447 next = first_node(policy->v.nodes);
1448 if (next < MAX_NUMNODES)
1454 * Depending on the memory policy provide a node from which to allocate the
1456 * @policy must be protected by freeing by the caller. If @policy is
1457 * the current task's mempolicy, this protection is implicit, as only the
1458 * task can change it's policy. The system default policy requires no
1461 unsigned slab_node(struct mempolicy *policy)
1463 if (!policy || policy->flags & MPOL_F_LOCAL)
1464 return numa_node_id();
1466 switch (policy->mode) {
1467 case MPOL_PREFERRED:
1469 * handled MPOL_F_LOCAL above
1471 return policy->v.preferred_node;
1473 case MPOL_INTERLEAVE:
1474 return interleave_nodes(policy);
1478 * Follow bind policy behavior and start allocation at the
1481 struct zonelist *zonelist;
1483 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1484 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1485 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1496 /* Do static interleaving for a VMA with known offset. */
1497 static unsigned offset_il_node(struct mempolicy *pol,
1498 struct vm_area_struct *vma, unsigned long off)
1500 unsigned nnodes = nodes_weight(pol->v.nodes);
1506 return numa_node_id();
1507 target = (unsigned int)off % nnodes;
1510 nid = next_node(nid, pol->v.nodes);
1512 } while (c <= target);
1516 /* Determine a node number for interleave */
1517 static inline unsigned interleave_nid(struct mempolicy *pol,
1518 struct vm_area_struct *vma, unsigned long addr, int shift)
1524 * for small pages, there is no difference between
1525 * shift and PAGE_SHIFT, so the bit-shift is safe.
1526 * for huge pages, since vm_pgoff is in units of small
1527 * pages, we need to shift off the always 0 bits to get
1530 BUG_ON(shift < PAGE_SHIFT);
1531 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1532 off += (addr - vma->vm_start) >> shift;
1533 return offset_il_node(pol, vma, off);
1535 return interleave_nodes(pol);
1538 #ifdef CONFIG_HUGETLBFS
1540 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1541 * @vma = virtual memory area whose policy is sought
1542 * @addr = address in @vma for shared policy lookup and interleave policy
1543 * @gfp_flags = for requested zone
1544 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1545 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1547 * Returns a zonelist suitable for a huge page allocation and a pointer
1548 * to the struct mempolicy for conditional unref after allocation.
1549 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1550 * @nodemask for filtering the zonelist.
1552 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1553 gfp_t gfp_flags, struct mempolicy **mpol,
1554 nodemask_t **nodemask)
1556 struct zonelist *zl;
1558 *mpol = get_vma_policy(current, vma, addr);
1559 *nodemask = NULL; /* assume !MPOL_BIND */
1561 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1562 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1563 huge_page_shift(hstate_vma(vma))), gfp_flags);
1565 zl = policy_zonelist(gfp_flags, *mpol);
1566 if ((*mpol)->mode == MPOL_BIND)
1567 *nodemask = &(*mpol)->v.nodes;
1573 /* Allocate a page in interleaved policy.
1574 Own path because it needs to do special accounting. */
1575 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1578 struct zonelist *zl;
1581 zl = node_zonelist(nid, gfp);
1582 page = __alloc_pages(gfp, order, zl);
1583 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1584 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1589 * alloc_page_vma - Allocate a page for a VMA.
1592 * %GFP_USER user allocation.
1593 * %GFP_KERNEL kernel allocations,
1594 * %GFP_HIGHMEM highmem/user allocations,
1595 * %GFP_FS allocation should not call back into a file system.
1596 * %GFP_ATOMIC don't sleep.
1598 * @vma: Pointer to VMA or NULL if not available.
1599 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1601 * This function allocates a page from the kernel page pool and applies
1602 * a NUMA policy associated with the VMA or the current process.
1603 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1604 * mm_struct of the VMA to prevent it from going away. Should be used for
1605 * all allocations for pages that will be mapped into
1606 * user space. Returns NULL when no page can be allocated.
1608 * Should be called with the mm_sem of the vma hold.
1611 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1613 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1614 struct zonelist *zl;
1616 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1619 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1621 return alloc_page_interleave(gfp, 0, nid);
1623 zl = policy_zonelist(gfp, pol);
1624 if (unlikely(mpol_needs_cond_ref(pol))) {
1626 * slow path: ref counted shared policy
1628 struct page *page = __alloc_pages_nodemask(gfp, 0,
1629 zl, policy_nodemask(gfp, pol));
1634 * fast path: default or task policy
1636 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1640 * alloc_pages_current - Allocate pages.
1643 * %GFP_USER user allocation,
1644 * %GFP_KERNEL kernel allocation,
1645 * %GFP_HIGHMEM highmem allocation,
1646 * %GFP_FS don't call back into a file system.
1647 * %GFP_ATOMIC don't sleep.
1648 * @order: Power of two of allocation size in pages. 0 is a single page.
1650 * Allocate a page from the kernel page pool. When not in
1651 * interrupt context and apply the current process NUMA policy.
1652 * Returns NULL when no page can be allocated.
1654 * Don't call cpuset_update_task_memory_state() unless
1655 * 1) it's ok to take cpuset_sem (can WAIT), and
1656 * 2) allocating for current task (not interrupt).
1658 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1660 struct mempolicy *pol = current->mempolicy;
1662 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1663 pol = &default_policy;
1666 * No reference counting needed for current->mempolicy
1667 * nor system default_policy
1669 if (pol->mode == MPOL_INTERLEAVE)
1670 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1671 return __alloc_pages_nodemask(gfp, order,
1672 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1674 EXPORT_SYMBOL(alloc_pages_current);
1677 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1678 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1679 * with the mems_allowed returned by cpuset_mems_allowed(). This
1680 * keeps mempolicies cpuset relative after its cpuset moves. See
1681 * further kernel/cpuset.c update_nodemask().
1684 /* Slow path of a mempolicy duplicate */
1685 struct mempolicy *__mpol_dup(struct mempolicy *old)
1687 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1690 return ERR_PTR(-ENOMEM);
1691 if (current_cpuset_is_being_rebound()) {
1692 nodemask_t mems = cpuset_mems_allowed(current);
1693 mpol_rebind_policy(old, &mems);
1696 atomic_set(&new->refcnt, 1);
1701 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1702 * eliminate the * MPOL_F_* flags that require conditional ref and
1703 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1704 * after return. Use the returned value.
1706 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1707 * policy lookup, even if the policy needs/has extra ref on lookup.
1708 * shmem_readahead needs this.
1710 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1711 struct mempolicy *frompol)
1713 if (!mpol_needs_cond_ref(frompol))
1717 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1718 __mpol_put(frompol);
1722 static int mpol_match_intent(const struct mempolicy *a,
1723 const struct mempolicy *b)
1725 if (a->flags != b->flags)
1727 if (!mpol_store_user_nodemask(a))
1729 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1732 /* Slow path of a mempolicy comparison */
1733 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1737 if (a->mode != b->mode)
1739 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1744 case MPOL_INTERLEAVE:
1745 return nodes_equal(a->v.nodes, b->v.nodes);
1746 case MPOL_PREFERRED:
1747 return a->v.preferred_node == b->v.preferred_node &&
1748 a->flags == b->flags;
1756 * Shared memory backing store policy support.
1758 * Remember policies even when nobody has shared memory mapped.
1759 * The policies are kept in Red-Black tree linked from the inode.
1760 * They are protected by the sp->lock spinlock, which should be held
1761 * for any accesses to the tree.
1764 /* lookup first element intersecting start-end */
1765 /* Caller holds sp->lock */
1766 static struct sp_node *
1767 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1769 struct rb_node *n = sp->root.rb_node;
1772 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1774 if (start >= p->end)
1776 else if (end <= p->start)
1784 struct sp_node *w = NULL;
1785 struct rb_node *prev = rb_prev(n);
1788 w = rb_entry(prev, struct sp_node, nd);
1789 if (w->end <= start)
1793 return rb_entry(n, struct sp_node, nd);
1796 /* Insert a new shared policy into the list. */
1797 /* Caller holds sp->lock */
1798 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1800 struct rb_node **p = &sp->root.rb_node;
1801 struct rb_node *parent = NULL;
1806 nd = rb_entry(parent, struct sp_node, nd);
1807 if (new->start < nd->start)
1809 else if (new->end > nd->end)
1810 p = &(*p)->rb_right;
1814 rb_link_node(&new->nd, parent, p);
1815 rb_insert_color(&new->nd, &sp->root);
1816 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1817 new->policy ? new->policy->mode : 0);
1820 /* Find shared policy intersecting idx */
1822 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1824 struct mempolicy *pol = NULL;
1827 if (!sp->root.rb_node)
1829 spin_lock(&sp->lock);
1830 sn = sp_lookup(sp, idx, idx+1);
1832 mpol_get(sn->policy);
1835 spin_unlock(&sp->lock);
1839 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1841 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1842 rb_erase(&n->nd, &sp->root);
1843 mpol_put(n->policy);
1844 kmem_cache_free(sn_cache, n);
1847 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1848 struct mempolicy *pol)
1850 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1857 pol->flags |= MPOL_F_SHARED; /* for unref */
1862 /* Replace a policy range. */
1863 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1864 unsigned long end, struct sp_node *new)
1866 struct sp_node *n, *new2 = NULL;
1869 spin_lock(&sp->lock);
1870 n = sp_lookup(sp, start, end);
1871 /* Take care of old policies in the same range. */
1872 while (n && n->start < end) {
1873 struct rb_node *next = rb_next(&n->nd);
1874 if (n->start >= start) {
1880 /* Old policy spanning whole new range. */
1883 spin_unlock(&sp->lock);
1884 new2 = sp_alloc(end, n->end, n->policy);
1890 sp_insert(sp, new2);
1898 n = rb_entry(next, struct sp_node, nd);
1902 spin_unlock(&sp->lock);
1904 mpol_put(new2->policy);
1905 kmem_cache_free(sn_cache, new2);
1911 * mpol_shared_policy_init - initialize shared policy for inode
1912 * @sp: pointer to inode shared policy
1913 * @mpol: struct mempolicy to install
1915 * Install non-NULL @mpol in inode's shared policy rb-tree.
1916 * On entry, the current task has a reference on a non-NULL @mpol.
1917 * This must be released on exit.
1918 * This is called at get_inode() calls and we can use GFP_KERNEL.
1920 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1924 sp->root = RB_ROOT; /* empty tree == default mempolicy */
1925 spin_lock_init(&sp->lock);
1928 struct vm_area_struct pvma;
1929 struct mempolicy *new;
1930 NODEMASK_SCRATCH(scratch);
1934 /* contextualize the tmpfs mount point mempolicy */
1935 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1937 mpol_put(mpol); /* drop our ref on sb mpol */
1938 NODEMASK_SCRATCH_FREE(scratch);
1939 return; /* no valid nodemask intersection */
1943 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
1944 task_unlock(current);
1945 mpol_put(mpol); /* drop our ref on sb mpol */
1947 NODEMASK_SCRATCH_FREE(scratch);
1952 /* Create pseudo-vma that contains just the policy */
1953 memset(&pvma, 0, sizeof(struct vm_area_struct));
1954 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
1955 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
1956 mpol_put(new); /* drop initial ref */
1957 NODEMASK_SCRATCH_FREE(scratch);
1961 int mpol_set_shared_policy(struct shared_policy *info,
1962 struct vm_area_struct *vma, struct mempolicy *npol)
1965 struct sp_node *new = NULL;
1966 unsigned long sz = vma_pages(vma);
1968 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1970 sz, npol ? npol->mode : -1,
1971 npol ? npol->flags : -1,
1972 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1975 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1979 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1981 kmem_cache_free(sn_cache, new);
1985 /* Free a backing policy store on inode delete. */
1986 void mpol_free_shared_policy(struct shared_policy *p)
1989 struct rb_node *next;
1991 if (!p->root.rb_node)
1993 spin_lock(&p->lock);
1994 next = rb_first(&p->root);
1996 n = rb_entry(next, struct sp_node, nd);
1997 next = rb_next(&n->nd);
1998 rb_erase(&n->nd, &p->root);
1999 mpol_put(n->policy);
2000 kmem_cache_free(sn_cache, n);
2002 spin_unlock(&p->lock);
2005 /* assumes fs == KERNEL_DS */
2006 void __init numa_policy_init(void)
2008 nodemask_t interleave_nodes;
2009 unsigned long largest = 0;
2010 int nid, prefer = 0;
2012 policy_cache = kmem_cache_create("numa_policy",
2013 sizeof(struct mempolicy),
2014 0, SLAB_PANIC, NULL);
2016 sn_cache = kmem_cache_create("shared_policy_node",
2017 sizeof(struct sp_node),
2018 0, SLAB_PANIC, NULL);
2021 * Set interleaving policy for system init. Interleaving is only
2022 * enabled across suitably sized nodes (default is >= 16MB), or
2023 * fall back to the largest node if they're all smaller.
2025 nodes_clear(interleave_nodes);
2026 for_each_node_state(nid, N_HIGH_MEMORY) {
2027 unsigned long total_pages = node_present_pages(nid);
2029 /* Preserve the largest node */
2030 if (largest < total_pages) {
2031 largest = total_pages;
2035 /* Interleave this node? */
2036 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2037 node_set(nid, interleave_nodes);
2040 /* All too small, use the largest */
2041 if (unlikely(nodes_empty(interleave_nodes)))
2042 node_set(prefer, interleave_nodes);
2044 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2045 printk("numa_policy_init: interleaving failed\n");
2048 /* Reset policy of current process to default */
2049 void numa_default_policy(void)
2051 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2055 * Parse and format mempolicy from/to strings
2059 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2060 * Used only for mpol_parse_str() and mpol_to_str()
2062 #define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
2063 static const char * const policy_types[] =
2064 { "default", "prefer", "bind", "interleave", "local" };
2069 * mpol_parse_str - parse string to mempolicy
2070 * @str: string containing mempolicy to parse
2071 * @mpol: pointer to struct mempolicy pointer, returned on success.
2072 * @no_context: flag whether to "contextualize" the mempolicy
2075 * <mode>[=<flags>][:<nodelist>]
2077 * if @no_context is true, save the input nodemask in w.user_nodemask in
2078 * the returned mempolicy. This will be used to "clone" the mempolicy in
2079 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2080 * mount option. Note that if 'static' or 'relative' mode flags were
2081 * specified, the input nodemask will already have been saved. Saving
2082 * it again is redundant, but safe.
2084 * On success, returns 0, else 1
2086 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2088 struct mempolicy *new = NULL;
2089 unsigned short uninitialized_var(mode);
2090 unsigned short uninitialized_var(mode_flags);
2092 char *nodelist = strchr(str, ':');
2093 char *flags = strchr(str, '=');
2098 /* NUL-terminate mode or flags string */
2100 if (nodelist_parse(nodelist, nodes))
2102 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2108 *flags++ = '\0'; /* terminate mode string */
2110 for (i = 0; i <= MPOL_LOCAL; i++) {
2111 if (!strcmp(str, policy_types[i])) {
2120 case MPOL_PREFERRED:
2122 * Insist on a nodelist of one node only
2125 char *rest = nodelist;
2126 while (isdigit(*rest))
2132 case MPOL_INTERLEAVE:
2134 * Default to online nodes with memory if no nodelist
2137 nodes = node_states[N_HIGH_MEMORY];
2142 * Don't allow a nodelist; mpol_new() checks flags
2146 mode = MPOL_PREFERRED;
2150 * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
2151 * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
2158 * Currently, we only support two mutually exclusive
2161 if (!strcmp(flags, "static"))
2162 mode_flags |= MPOL_F_STATIC_NODES;
2163 else if (!strcmp(flags, "relative"))
2164 mode_flags |= MPOL_F_RELATIVE_NODES;
2169 new = mpol_new(mode, mode_flags, &nodes);
2174 NODEMASK_SCRATCH(scratch);
2177 ret = mpol_set_nodemask(new, &nodes, scratch);
2178 task_unlock(current);
2181 NODEMASK_SCRATCH_FREE(scratch);
2185 } else if (no_context) {
2186 /* save for contextualization */
2187 new->w.user_nodemask = nodes;
2192 /* Restore string for error message */
2201 #endif /* CONFIG_TMPFS */
2204 * mpol_to_str - format a mempolicy structure for printing
2205 * @buffer: to contain formatted mempolicy string
2206 * @maxlen: length of @buffer
2207 * @pol: pointer to mempolicy to be formatted
2208 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2210 * Convert a mempolicy into a string.
2211 * Returns the number of characters in buffer (if positive)
2212 * or an error (negative)
2214 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2219 unsigned short mode;
2220 unsigned short flags = pol ? pol->flags : 0;
2223 * Sanity check: room for longest mode, flag and some nodes
2225 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2227 if (!pol || pol == &default_policy)
2228 mode = MPOL_DEFAULT;
2237 case MPOL_PREFERRED:
2239 if (flags & MPOL_F_LOCAL)
2240 mode = MPOL_LOCAL; /* pseudo-policy */
2242 node_set(pol->v.preferred_node, nodes);
2247 case MPOL_INTERLEAVE:
2249 nodes = pol->w.user_nodemask;
2251 nodes = pol->v.nodes;
2258 l = strlen(policy_types[mode]);
2259 if (buffer + maxlen < p + l + 1)
2262 strcpy(p, policy_types[mode]);
2265 if (flags & MPOL_MODE_FLAGS) {
2266 if (buffer + maxlen < p + 2)
2271 * Currently, the only defined flags are mutually exclusive
2273 if (flags & MPOL_F_STATIC_NODES)
2274 p += snprintf(p, buffer + maxlen - p, "static");
2275 else if (flags & MPOL_F_RELATIVE_NODES)
2276 p += snprintf(p, buffer + maxlen - p, "relative");
2279 if (!nodes_empty(nodes)) {
2280 if (buffer + maxlen < p + 2)
2283 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2289 unsigned long pages;
2291 unsigned long active;
2292 unsigned long writeback;
2293 unsigned long mapcount_max;
2294 unsigned long dirty;
2295 unsigned long swapcache;
2296 unsigned long node[MAX_NUMNODES];
2299 static void gather_stats(struct page *page, void *private, int pte_dirty)
2301 struct numa_maps *md = private;
2302 int count = page_mapcount(page);
2305 if (pte_dirty || PageDirty(page))
2308 if (PageSwapCache(page))
2311 if (PageActive(page) || PageUnevictable(page))
2314 if (PageWriteback(page))
2320 if (count > md->mapcount_max)
2321 md->mapcount_max = count;
2323 md->node[page_to_nid(page)]++;
2326 #ifdef CONFIG_HUGETLB_PAGE
2327 static void check_huge_range(struct vm_area_struct *vma,
2328 unsigned long start, unsigned long end,
2329 struct numa_maps *md)
2333 struct hstate *h = hstate_vma(vma);
2334 unsigned long sz = huge_page_size(h);
2336 for (addr = start; addr < end; addr += sz) {
2337 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2338 addr & huge_page_mask(h));
2348 page = pte_page(pte);
2352 gather_stats(page, md, pte_dirty(*ptep));
2356 static inline void check_huge_range(struct vm_area_struct *vma,
2357 unsigned long start, unsigned long end,
2358 struct numa_maps *md)
2364 * Display pages allocated per node and memory policy via /proc.
2366 int show_numa_map(struct seq_file *m, void *v)
2368 struct proc_maps_private *priv = m->private;
2369 struct vm_area_struct *vma = v;
2370 struct numa_maps *md;
2371 struct file *file = vma->vm_file;
2372 struct mm_struct *mm = vma->vm_mm;
2373 struct mempolicy *pol;
2380 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2384 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2385 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2388 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2391 seq_printf(m, " file=");
2392 seq_path(m, &file->f_path, "\n\t= ");
2393 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2394 seq_printf(m, " heap");
2395 } else if (vma->vm_start <= mm->start_stack &&
2396 vma->vm_end >= mm->start_stack) {
2397 seq_printf(m, " stack");
2400 if (is_vm_hugetlb_page(vma)) {
2401 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2402 seq_printf(m, " huge");
2404 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2405 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2412 seq_printf(m," anon=%lu",md->anon);
2415 seq_printf(m," dirty=%lu",md->dirty);
2417 if (md->pages != md->anon && md->pages != md->dirty)
2418 seq_printf(m, " mapped=%lu", md->pages);
2420 if (md->mapcount_max > 1)
2421 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2424 seq_printf(m," swapcache=%lu", md->swapcache);
2426 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2427 seq_printf(m," active=%lu", md->active);
2430 seq_printf(m," writeback=%lu", md->writeback);
2432 for_each_node_state(n, N_HIGH_MEMORY)
2434 seq_printf(m, " N%d=%lu", n, md->node[n]);
2439 if (m->count < m->size)
2440 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;