2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #include <linux/mempolicy.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/slab.h>
77 #include <linux/string.h>
78 #include <linux/module.h>
79 #include <linux/nsproxy.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/compat.h>
83 #include <linux/swap.h>
84 #include <linux/seq_file.h>
85 #include <linux/proc_fs.h>
86 #include <linux/migrate.h>
87 #include <linux/ksm.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
94 #include <asm/tlbflush.h>
95 #include <asm/uaccess.h>
100 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
101 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
103 static struct kmem_cache *policy_cache;
104 static struct kmem_cache *sn_cache;
106 /* Highest zone. An specific allocation for a zone below that is not
108 enum zone_type policy_zone = 0;
111 * run-time system-wide default policy => local allocation
113 struct mempolicy default_policy = {
114 .refcnt = ATOMIC_INIT(1), /* never free it */
115 .mode = MPOL_PREFERRED,
116 .flags = MPOL_F_LOCAL,
119 static const struct mempolicy_operations {
120 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122 * If read-side task has no lock to protect task->mempolicy, write-side
123 * task will rebind the task->mempolicy by two step. The first step is
124 * setting all the newly nodes, and the second step is cleaning all the
125 * disallowed nodes. In this way, we can avoid finding no node to alloc
127 * If we have a lock to protect task->mempolicy in read-side, we do
131 * MPOL_REBIND_ONCE - do rebind work at once
132 * MPOL_REBIND_STEP1 - set all the newly nodes
133 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
135 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
136 enum mpol_rebind_step step);
137 } mpol_ops[MPOL_MAX];
139 /* Check that the nodemask contains at least one populated zone */
140 static int is_valid_nodemask(const nodemask_t *nodemask)
144 for_each_node_mask(nd, *nodemask) {
147 for (k = 0; k <= policy_zone; k++) {
148 z = &NODE_DATA(nd)->node_zones[k];
149 if (z->present_pages > 0)
157 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159 return pol->flags & MPOL_MODE_FLAGS;
162 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
163 const nodemask_t *rel)
166 nodes_fold(tmp, *orig, nodes_weight(*rel));
167 nodes_onto(*ret, tmp, *rel);
170 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172 if (nodes_empty(*nodes))
174 pol->v.nodes = *nodes;
178 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
181 pol->flags |= MPOL_F_LOCAL; /* local allocation */
182 else if (nodes_empty(*nodes))
183 return -EINVAL; /* no allowed nodes */
185 pol->v.preferred_node = first_node(*nodes);
189 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191 if (!is_valid_nodemask(nodes))
193 pol->v.nodes = *nodes;
198 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
199 * any, for the new policy. mpol_new() has already validated the nodes
200 * parameter with respect to the policy mode and flags. But, we need to
201 * handle an empty nodemask with MPOL_PREFERRED here.
203 * Must be called holding task's alloc_lock to protect task's mems_allowed
204 * and mempolicy. May also be called holding the mmap_semaphore for write.
206 static int mpol_set_nodemask(struct mempolicy *pol,
207 const nodemask_t *nodes, struct nodemask_scratch *nsc)
211 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
214 /* Check N_HIGH_MEMORY */
215 nodes_and(nsc->mask1,
216 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
219 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
220 nodes = NULL; /* explicit local allocation */
222 if (pol->flags & MPOL_F_RELATIVE_NODES)
223 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 nodes_and(nsc->mask2, *nodes, nsc->mask1);
227 if (mpol_store_user_nodemask(pol))
228 pol->w.user_nodemask = *nodes;
230 pol->w.cpuset_mems_allowed =
231 cpuset_current_mems_allowed;
235 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 ret = mpol_ops[pol->mode].create(pol, NULL);
242 * This function just creates a new policy, does some check and simple
243 * initialization. You must invoke mpol_set_nodemask() to set nodes.
245 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
248 struct mempolicy *policy;
250 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
251 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253 if (mode == MPOL_DEFAULT) {
254 if (nodes && !nodes_empty(*nodes))
255 return ERR_PTR(-EINVAL);
256 return NULL; /* simply delete any existing policy */
261 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
262 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
263 * All other modes require a valid pointer to a non-empty nodemask.
265 if (mode == MPOL_PREFERRED) {
266 if (nodes_empty(*nodes)) {
267 if (((flags & MPOL_F_STATIC_NODES) ||
268 (flags & MPOL_F_RELATIVE_NODES)))
269 return ERR_PTR(-EINVAL);
271 } else if (nodes_empty(*nodes))
272 return ERR_PTR(-EINVAL);
273 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 return ERR_PTR(-ENOMEM);
276 atomic_set(&policy->refcnt, 1);
278 policy->flags = flags;
283 /* Slow path of a mpol destructor. */
284 void __mpol_put(struct mempolicy *p)
286 if (!atomic_dec_and_test(&p->refcnt))
288 kmem_cache_free(policy_cache, p);
291 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
292 enum mpol_rebind_step step)
298 * MPOL_REBIND_ONCE - do rebind work at once
299 * MPOL_REBIND_STEP1 - set all the newly nodes
300 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
302 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
303 enum mpol_rebind_step step)
307 if (pol->flags & MPOL_F_STATIC_NODES)
308 nodes_and(tmp, pol->w.user_nodemask, *nodes);
309 else if (pol->flags & MPOL_F_RELATIVE_NODES)
310 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
313 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
316 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
317 nodes_remap(tmp, pol->v.nodes,
318 pol->w.cpuset_mems_allowed, *nodes);
319 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
320 } else if (step == MPOL_REBIND_STEP2) {
321 tmp = pol->w.cpuset_mems_allowed;
322 pol->w.cpuset_mems_allowed = *nodes;
327 if (nodes_empty(tmp))
330 if (step == MPOL_REBIND_STEP1)
331 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
332 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
337 if (!node_isset(current->il_next, tmp)) {
338 current->il_next = next_node(current->il_next, tmp);
339 if (current->il_next >= MAX_NUMNODES)
340 current->il_next = first_node(tmp);
341 if (current->il_next >= MAX_NUMNODES)
342 current->il_next = numa_node_id();
346 static void mpol_rebind_preferred(struct mempolicy *pol,
347 const nodemask_t *nodes,
348 enum mpol_rebind_step step)
352 if (pol->flags & MPOL_F_STATIC_NODES) {
353 int node = first_node(pol->w.user_nodemask);
355 if (node_isset(node, *nodes)) {
356 pol->v.preferred_node = node;
357 pol->flags &= ~MPOL_F_LOCAL;
359 pol->flags |= MPOL_F_LOCAL;
360 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
361 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
362 pol->v.preferred_node = first_node(tmp);
363 } else if (!(pol->flags & MPOL_F_LOCAL)) {
364 pol->v.preferred_node = node_remap(pol->v.preferred_node,
365 pol->w.cpuset_mems_allowed,
367 pol->w.cpuset_mems_allowed = *nodes;
372 * mpol_rebind_policy - Migrate a policy to a different set of nodes
374 * If read-side task has no lock to protect task->mempolicy, write-side
375 * task will rebind the task->mempolicy by two step. The first step is
376 * setting all the newly nodes, and the second step is cleaning all the
377 * disallowed nodes. In this way, we can avoid finding no node to alloc
379 * If we have a lock to protect task->mempolicy in read-side, we do
383 * MPOL_REBIND_ONCE - do rebind work at once
384 * MPOL_REBIND_STEP1 - set all the newly nodes
385 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
387 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
388 enum mpol_rebind_step step)
392 if (!mpol_store_user_nodemask(pol) && step == 0 &&
393 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
396 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
399 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
402 if (step == MPOL_REBIND_STEP1)
403 pol->flags |= MPOL_F_REBINDING;
404 else if (step == MPOL_REBIND_STEP2)
405 pol->flags &= ~MPOL_F_REBINDING;
406 else if (step >= MPOL_REBIND_NSTEP)
409 mpol_ops[pol->mode].rebind(pol, newmask, step);
413 * Wrapper for mpol_rebind_policy() that just requires task
414 * pointer, and updates task mempolicy.
416 * Called with task's alloc_lock held.
419 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
420 enum mpol_rebind_step step)
422 mpol_rebind_policy(tsk->mempolicy, new, step);
426 * Rebind each vma in mm to new nodemask.
428 * Call holding a reference to mm. Takes mm->mmap_sem during call.
431 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433 struct vm_area_struct *vma;
435 down_write(&mm->mmap_sem);
436 for (vma = mm->mmap; vma; vma = vma->vm_next)
437 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
438 up_write(&mm->mmap_sem);
441 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 .rebind = mpol_rebind_default,
445 [MPOL_INTERLEAVE] = {
446 .create = mpol_new_interleave,
447 .rebind = mpol_rebind_nodemask,
450 .create = mpol_new_preferred,
451 .rebind = mpol_rebind_preferred,
454 .create = mpol_new_bind,
455 .rebind = mpol_rebind_nodemask,
459 static void migrate_page_add(struct page *page, struct list_head *pagelist,
460 unsigned long flags);
462 /* Scan through pages checking if pages follow certain conditions. */
463 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
464 unsigned long addr, unsigned long end,
465 const nodemask_t *nodes, unsigned long flags,
472 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
477 if (!pte_present(*pte))
479 page = vm_normal_page(vma, addr, *pte);
483 * vm_normal_page() filters out zero pages, but there might
484 * still be PageReserved pages to skip, perhaps in a VDSO.
485 * And we cannot move PageKsm pages sensibly or safely yet.
487 if (PageReserved(page) || PageKsm(page))
489 nid = page_to_nid(page);
490 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
494 migrate_page_add(page, private, flags);
497 } while (pte++, addr += PAGE_SIZE, addr != end);
498 pte_unmap_unlock(orig_pte, ptl);
502 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
503 unsigned long addr, unsigned long end,
504 const nodemask_t *nodes, unsigned long flags,
510 pmd = pmd_offset(pud, addr);
512 next = pmd_addr_end(addr, end);
513 split_huge_page_pmd(vma->vm_mm, pmd);
514 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
516 if (check_pte_range(vma, pmd, addr, next, nodes,
519 } while (pmd++, addr = next, addr != end);
523 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
524 unsigned long addr, unsigned long end,
525 const nodemask_t *nodes, unsigned long flags,
531 pud = pud_offset(pgd, addr);
533 next = pud_addr_end(addr, end);
534 if (pud_none_or_clear_bad(pud))
536 if (check_pmd_range(vma, pud, addr, next, nodes,
539 } while (pud++, addr = next, addr != end);
543 static inline int check_pgd_range(struct vm_area_struct *vma,
544 unsigned long addr, unsigned long end,
545 const nodemask_t *nodes, unsigned long flags,
551 pgd = pgd_offset(vma->vm_mm, addr);
553 next = pgd_addr_end(addr, end);
554 if (pgd_none_or_clear_bad(pgd))
556 if (check_pud_range(vma, pgd, addr, next, nodes,
559 } while (pgd++, addr = next, addr != end);
564 * Check if all pages in a range are on a set of nodes.
565 * If pagelist != NULL then isolate pages from the LRU and
566 * put them on the pagelist.
568 static struct vm_area_struct *
569 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
570 const nodemask_t *nodes, unsigned long flags, void *private)
573 struct vm_area_struct *first, *vma, *prev;
576 first = find_vma(mm, start);
578 return ERR_PTR(-EFAULT);
580 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
581 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
582 if (!vma->vm_next && vma->vm_end < end)
583 return ERR_PTR(-EFAULT);
584 if (prev && prev->vm_end < vma->vm_start)
585 return ERR_PTR(-EFAULT);
587 if (!is_vm_hugetlb_page(vma) &&
588 ((flags & MPOL_MF_STRICT) ||
589 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
590 vma_migratable(vma)))) {
591 unsigned long endvma = vma->vm_end;
595 if (vma->vm_start > start)
596 start = vma->vm_start;
597 err = check_pgd_range(vma, start, endvma, nodes,
600 first = ERR_PTR(err);
610 * Apply policy to a single VMA
611 * This must be called with the mmap_sem held for writing.
613 static int vma_replace_policy(struct vm_area_struct *vma,
614 struct mempolicy *pol)
617 struct mempolicy *old;
618 struct mempolicy *new;
620 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
621 vma->vm_start, vma->vm_end, vma->vm_pgoff,
622 vma->vm_ops, vma->vm_file,
623 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
629 if (vma->vm_ops && vma->vm_ops->set_policy) {
630 err = vma->vm_ops->set_policy(vma, new);
635 old = vma->vm_policy;
636 vma->vm_policy = new; /* protected by mmap_sem */
645 /* Step 2: apply policy to a range and do splits. */
646 static int mbind_range(struct mm_struct *mm, unsigned long start,
647 unsigned long end, struct mempolicy *new_pol)
649 struct vm_area_struct *next;
650 struct vm_area_struct *prev;
651 struct vm_area_struct *vma;
654 unsigned long vmstart;
657 vma = find_vma_prev(mm, start, &prev);
658 if (!vma || vma->vm_start > start)
661 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
663 vmstart = max(start, vma->vm_start);
664 vmend = min(end, vma->vm_end);
666 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
667 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
668 vma->anon_vma, vma->vm_file, pgoff, new_pol);
674 if (vma->vm_start != vmstart) {
675 err = split_vma(vma->vm_mm, vma, vmstart, 1);
679 if (vma->vm_end != vmend) {
680 err = split_vma(vma->vm_mm, vma, vmend, 0);
684 err = vma_replace_policy(vma, new_pol);
694 * Update task->flags PF_MEMPOLICY bit: set iff non-default
695 * mempolicy. Allows more rapid checking of this (combined perhaps
696 * with other PF_* flag bits) on memory allocation hot code paths.
698 * If called from outside this file, the task 'p' should -only- be
699 * a newly forked child not yet visible on the task list, because
700 * manipulating the task flags of a visible task is not safe.
702 * The above limitation is why this routine has the funny name
703 * mpol_fix_fork_child_flag().
705 * It is also safe to call this with a task pointer of current,
706 * which the static wrapper mpol_set_task_struct_flag() does,
707 * for use within this file.
710 void mpol_fix_fork_child_flag(struct task_struct *p)
713 p->flags |= PF_MEMPOLICY;
715 p->flags &= ~PF_MEMPOLICY;
718 static void mpol_set_task_struct_flag(void)
720 mpol_fix_fork_child_flag(current);
723 /* Set the process memory policy */
724 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
727 struct mempolicy *new, *old;
728 struct mm_struct *mm = current->mm;
729 NODEMASK_SCRATCH(scratch);
735 new = mpol_new(mode, flags, nodes);
741 * prevent changing our mempolicy while show_numa_maps()
743 * Note: do_set_mempolicy() can be called at init time
747 down_write(&mm->mmap_sem);
749 ret = mpol_set_nodemask(new, nodes, scratch);
751 task_unlock(current);
753 up_write(&mm->mmap_sem);
757 old = current->mempolicy;
758 current->mempolicy = new;
759 mpol_set_task_struct_flag();
760 if (new && new->mode == MPOL_INTERLEAVE &&
761 nodes_weight(new->v.nodes))
762 current->il_next = first_node(new->v.nodes);
763 task_unlock(current);
765 up_write(&mm->mmap_sem);
770 NODEMASK_SCRATCH_FREE(scratch);
775 * Return nodemask for policy for get_mempolicy() query
777 * Called with task's alloc_lock held
779 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
782 if (p == &default_policy)
788 case MPOL_INTERLEAVE:
792 if (!(p->flags & MPOL_F_LOCAL))
793 node_set(p->v.preferred_node, *nodes);
794 /* else return empty node mask for local allocation */
801 static int lookup_node(struct mm_struct *mm, unsigned long addr)
806 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
808 err = page_to_nid(p);
814 /* Retrieve NUMA policy */
815 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
816 unsigned long addr, unsigned long flags)
819 struct mm_struct *mm = current->mm;
820 struct vm_area_struct *vma = NULL;
821 struct mempolicy *pol = current->mempolicy;
824 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
827 if (flags & MPOL_F_MEMS_ALLOWED) {
828 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
830 *policy = 0; /* just so it's initialized */
832 *nmask = cpuset_current_mems_allowed;
833 task_unlock(current);
837 if (flags & MPOL_F_ADDR) {
839 * Do NOT fall back to task policy if the
840 * vma/shared policy at addr is NULL. We
841 * want to return MPOL_DEFAULT in this case.
843 down_read(&mm->mmap_sem);
844 vma = find_vma_intersection(mm, addr, addr+1);
846 up_read(&mm->mmap_sem);
849 if (vma->vm_ops && vma->vm_ops->get_policy)
850 pol = vma->vm_ops->get_policy(vma, addr);
852 pol = vma->vm_policy;
857 pol = &default_policy; /* indicates default behavior */
859 if (flags & MPOL_F_NODE) {
860 if (flags & MPOL_F_ADDR) {
861 err = lookup_node(mm, addr);
865 } else if (pol == current->mempolicy &&
866 pol->mode == MPOL_INTERLEAVE) {
867 *policy = current->il_next;
873 *policy = pol == &default_policy ? MPOL_DEFAULT :
876 * Internal mempolicy flags must be masked off before exposing
877 * the policy to userspace.
879 *policy |= (pol->flags & MPOL_MODE_FLAGS);
883 up_read(¤t->mm->mmap_sem);
889 if (mpol_store_user_nodemask(pol)) {
890 *nmask = pol->w.user_nodemask;
893 get_policy_nodemask(pol, nmask);
894 task_unlock(current);
901 up_read(¤t->mm->mmap_sem);
905 #ifdef CONFIG_MIGRATION
909 static void migrate_page_add(struct page *page, struct list_head *pagelist,
913 * Avoid migrating a page that is shared with others.
915 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
916 if (!isolate_lru_page(page)) {
917 list_add_tail(&page->lru, pagelist);
918 inc_zone_page_state(page, NR_ISOLATED_ANON +
919 page_is_file_cache(page));
924 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
926 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
930 * Migrate pages from one node to a target node.
931 * Returns error or the number of pages not migrated.
933 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
939 struct vm_area_struct *vma;
942 node_set(source, nmask);
944 vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
945 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
949 if (!list_empty(&pagelist)) {
950 err = migrate_pages(&pagelist, new_node_page, dest,
951 false, MIGRATE_SYNC);
953 putback_lru_pages(&pagelist);
960 * Move pages between the two nodesets so as to preserve the physical
961 * layout as much as possible.
963 * Returns the number of page that could not be moved.
965 int do_migrate_pages(struct mm_struct *mm,
966 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
972 err = migrate_prep();
976 down_read(&mm->mmap_sem);
978 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
983 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
984 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
985 * bit in 'tmp', and return that <source, dest> pair for migration.
986 * The pair of nodemasks 'to' and 'from' define the map.
988 * If no pair of bits is found that way, fallback to picking some
989 * pair of 'source' and 'dest' bits that are not the same. If the
990 * 'source' and 'dest' bits are the same, this represents a node
991 * that will be migrating to itself, so no pages need move.
993 * If no bits are left in 'tmp', or if all remaining bits left
994 * in 'tmp' correspond to the same bit in 'to', return false
995 * (nothing left to migrate).
997 * This lets us pick a pair of nodes to migrate between, such that
998 * if possible the dest node is not already occupied by some other
999 * source node, minimizing the risk of overloading the memory on a
1000 * node that would happen if we migrated incoming memory to a node
1001 * before migrating outgoing memory source that same node.
1003 * A single scan of tmp is sufficient. As we go, we remember the
1004 * most recent <s, d> pair that moved (s != d). If we find a pair
1005 * that not only moved, but what's better, moved to an empty slot
1006 * (d is not set in tmp), then we break out then, with that pair.
1007 * Otherwise when we finish scanning from_tmp, we at least have the
1008 * most recent <s, d> pair that moved. If we get all the way through
1009 * the scan of tmp without finding any node that moved, much less
1010 * moved to an empty node, then there is nothing left worth migrating.
1014 while (!nodes_empty(tmp)) {
1019 for_each_node_mask(s, tmp) {
1020 d = node_remap(s, *from_nodes, *to_nodes);
1024 source = s; /* Node moved. Memorize */
1027 /* dest not in remaining from nodes? */
1028 if (!node_isset(dest, tmp))
1034 node_clear(source, tmp);
1035 err = migrate_to_node(mm, source, dest, flags);
1042 up_read(&mm->mmap_sem);
1050 * Allocate a new page for page migration based on vma policy.
1051 * Start assuming that page is mapped by vma pointed to by @private.
1052 * Search forward from there, if not. N.B., this assumes that the
1053 * list of pages handed to migrate_pages()--which is how we get here--
1054 * is in virtual address order.
1056 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1058 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1059 unsigned long uninitialized_var(address);
1062 address = page_address_in_vma(page, vma);
1063 if (address != -EFAULT)
1069 * if !vma, alloc_page_vma() will use task or system default policy
1071 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1075 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1076 unsigned long flags)
1080 int do_migrate_pages(struct mm_struct *mm,
1081 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1086 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1092 static long do_mbind(unsigned long start, unsigned long len,
1093 unsigned short mode, unsigned short mode_flags,
1094 nodemask_t *nmask, unsigned long flags)
1096 struct vm_area_struct *vma;
1097 struct mm_struct *mm = current->mm;
1098 struct mempolicy *new;
1101 LIST_HEAD(pagelist);
1103 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1104 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1106 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1109 if (start & ~PAGE_MASK)
1112 if (mode == MPOL_DEFAULT)
1113 flags &= ~MPOL_MF_STRICT;
1115 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1123 new = mpol_new(mode, mode_flags, nmask);
1125 return PTR_ERR(new);
1128 * If we are using the default policy then operation
1129 * on discontinuous address spaces is okay after all
1132 flags |= MPOL_MF_DISCONTIG_OK;
1134 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1135 start, start + len, mode, mode_flags,
1136 nmask ? nodes_addr(*nmask)[0] : -1);
1138 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1140 err = migrate_prep();
1145 NODEMASK_SCRATCH(scratch);
1147 down_write(&mm->mmap_sem);
1149 err = mpol_set_nodemask(new, nmask, scratch);
1150 task_unlock(current);
1152 up_write(&mm->mmap_sem);
1155 NODEMASK_SCRATCH_FREE(scratch);
1160 vma = check_range(mm, start, end, nmask,
1161 flags | MPOL_MF_INVERT, &pagelist);
1167 err = mbind_range(mm, start, end, new);
1169 if (!list_empty(&pagelist)) {
1170 nr_failed = migrate_pages(&pagelist, new_vma_page,
1174 putback_lru_pages(&pagelist);
1177 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1180 putback_lru_pages(&pagelist);
1182 up_write(&mm->mmap_sem);
1189 * User space interface with variable sized bitmaps for nodelists.
1192 /* Copy a node mask from user space. */
1193 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1194 unsigned long maxnode)
1197 unsigned long nlongs;
1198 unsigned long endmask;
1201 nodes_clear(*nodes);
1202 if (maxnode == 0 || !nmask)
1204 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1207 nlongs = BITS_TO_LONGS(maxnode);
1208 if ((maxnode % BITS_PER_LONG) == 0)
1211 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1213 /* When the user specified more nodes than supported just check
1214 if the non supported part is all zero. */
1215 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1216 if (nlongs > PAGE_SIZE/sizeof(long))
1218 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1220 if (get_user(t, nmask + k))
1222 if (k == nlongs - 1) {
1228 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1232 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1234 nodes_addr(*nodes)[nlongs-1] &= endmask;
1238 /* Copy a kernel node mask to user space */
1239 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1242 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1243 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1245 if (copy > nbytes) {
1246 if (copy > PAGE_SIZE)
1248 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1252 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1255 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1256 unsigned long, mode, unsigned long __user *, nmask,
1257 unsigned long, maxnode, unsigned, flags)
1261 unsigned short mode_flags;
1263 mode_flags = mode & MPOL_MODE_FLAGS;
1264 mode &= ~MPOL_MODE_FLAGS;
1265 if (mode >= MPOL_MAX)
1267 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1268 (mode_flags & MPOL_F_RELATIVE_NODES))
1270 err = get_nodes(&nodes, nmask, maxnode);
1273 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1276 /* Set the process memory policy */
1277 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1278 unsigned long, maxnode)
1282 unsigned short flags;
1284 flags = mode & MPOL_MODE_FLAGS;
1285 mode &= ~MPOL_MODE_FLAGS;
1286 if ((unsigned int)mode >= MPOL_MAX)
1288 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1290 err = get_nodes(&nodes, nmask, maxnode);
1293 return do_set_mempolicy(mode, flags, &nodes);
1296 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1297 const unsigned long __user *, old_nodes,
1298 const unsigned long __user *, new_nodes)
1300 const struct cred *cred = current_cred(), *tcred;
1301 struct mm_struct *mm = NULL;
1302 struct task_struct *task;
1303 nodemask_t task_nodes;
1307 NODEMASK_SCRATCH(scratch);
1312 old = &scratch->mask1;
1313 new = &scratch->mask2;
1315 err = get_nodes(old, old_nodes, maxnode);
1319 err = get_nodes(new, new_nodes, maxnode);
1323 /* Find the mm_struct */
1325 task = pid ? find_task_by_vpid(pid) : current;
1331 mm = get_task_mm(task);
1339 * Check if this process has the right to modify the specified
1340 * process. The right exists if the process has administrative
1341 * capabilities, superuser privileges or the same
1342 * userid as the target process.
1345 tcred = __task_cred(task);
1346 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1347 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1348 !capable(CAP_SYS_NICE)) {
1355 task_nodes = cpuset_mems_allowed(task);
1356 /* Is the user allowed to access the target nodes? */
1357 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1362 if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1367 err = security_task_movememory(task);
1371 err = do_migrate_pages(mm, old, new,
1372 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1376 NODEMASK_SCRATCH_FREE(scratch);
1382 /* Retrieve NUMA policy */
1383 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1384 unsigned long __user *, nmask, unsigned long, maxnode,
1385 unsigned long, addr, unsigned long, flags)
1388 int uninitialized_var(pval);
1391 if (nmask != NULL && maxnode < MAX_NUMNODES)
1394 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1399 if (policy && put_user(pval, policy))
1403 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1408 #ifdef CONFIG_COMPAT
1410 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1411 compat_ulong_t __user *nmask,
1412 compat_ulong_t maxnode,
1413 compat_ulong_t addr, compat_ulong_t flags)
1416 unsigned long __user *nm = NULL;
1417 unsigned long nr_bits, alloc_size;
1418 DECLARE_BITMAP(bm, MAX_NUMNODES);
1420 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1421 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1424 nm = compat_alloc_user_space(alloc_size);
1426 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1428 if (!err && nmask) {
1429 err = copy_from_user(bm, nm, alloc_size);
1430 /* ensure entire bitmap is zeroed */
1431 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1432 err |= compat_put_bitmap(nmask, bm, nr_bits);
1438 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1439 compat_ulong_t maxnode)
1442 unsigned long __user *nm = NULL;
1443 unsigned long nr_bits, alloc_size;
1444 DECLARE_BITMAP(bm, MAX_NUMNODES);
1446 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1447 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1450 err = compat_get_bitmap(bm, nmask, nr_bits);
1451 nm = compat_alloc_user_space(alloc_size);
1452 err |= copy_to_user(nm, bm, alloc_size);
1458 return sys_set_mempolicy(mode, nm, nr_bits+1);
1461 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1462 compat_ulong_t mode, compat_ulong_t __user *nmask,
1463 compat_ulong_t maxnode, compat_ulong_t flags)
1466 unsigned long __user *nm = NULL;
1467 unsigned long nr_bits, alloc_size;
1470 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1471 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1474 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1475 nm = compat_alloc_user_space(alloc_size);
1476 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1482 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1488 * get_vma_policy(@task, @vma, @addr)
1489 * @task - task for fallback if vma policy == default
1490 * @vma - virtual memory area whose policy is sought
1491 * @addr - address in @vma for shared policy lookup
1493 * Returns effective policy for a VMA at specified address.
1494 * Falls back to @task or system default policy, as necessary.
1495 * Current or other task's task mempolicy and non-shared vma policies
1496 * are protected by the task's mmap_sem, which must be held for read by
1498 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1499 * count--added by the get_policy() vm_op, as appropriate--to protect against
1500 * freeing by another task. It is the caller's responsibility to free the
1501 * extra reference for shared policies.
1503 struct mempolicy *get_vma_policy(struct task_struct *task,
1504 struct vm_area_struct *vma, unsigned long addr)
1506 struct mempolicy *pol = task->mempolicy;
1509 if (vma->vm_ops && vma->vm_ops->get_policy) {
1510 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1514 } else if (vma->vm_policy) {
1515 pol = vma->vm_policy;
1518 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1519 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1520 * count on these policies which will be dropped by
1521 * mpol_cond_put() later
1523 if (mpol_needs_cond_ref(pol))
1528 pol = &default_policy;
1533 * Return a nodemask representing a mempolicy for filtering nodes for
1536 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1538 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1539 if (unlikely(policy->mode == MPOL_BIND) &&
1540 gfp_zone(gfp) >= policy_zone &&
1541 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1542 return &policy->v.nodes;
1547 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1548 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1551 switch (policy->mode) {
1552 case MPOL_PREFERRED:
1553 if (!(policy->flags & MPOL_F_LOCAL))
1554 nd = policy->v.preferred_node;
1558 * Normally, MPOL_BIND allocations are node-local within the
1559 * allowed nodemask. However, if __GFP_THISNODE is set and the
1560 * current node isn't part of the mask, we use the zonelist for
1561 * the first node in the mask instead.
1563 if (unlikely(gfp & __GFP_THISNODE) &&
1564 unlikely(!node_isset(nd, policy->v.nodes)))
1565 nd = first_node(policy->v.nodes);
1570 return node_zonelist(nd, gfp);
1573 /* Do dynamic interleaving for a process */
1574 static unsigned interleave_nodes(struct mempolicy *policy)
1577 struct task_struct *me = current;
1580 next = next_node(nid, policy->v.nodes);
1581 if (next >= MAX_NUMNODES)
1582 next = first_node(policy->v.nodes);
1583 if (next < MAX_NUMNODES)
1589 * Depending on the memory policy provide a node from which to allocate the
1591 * @policy must be protected by freeing by the caller. If @policy is
1592 * the current task's mempolicy, this protection is implicit, as only the
1593 * task can change it's policy. The system default policy requires no
1596 unsigned slab_node(struct mempolicy *policy)
1598 if (!policy || policy->flags & MPOL_F_LOCAL)
1599 return numa_node_id();
1601 switch (policy->mode) {
1602 case MPOL_PREFERRED:
1604 * handled MPOL_F_LOCAL above
1606 return policy->v.preferred_node;
1608 case MPOL_INTERLEAVE:
1609 return interleave_nodes(policy);
1613 * Follow bind policy behavior and start allocation at the
1616 struct zonelist *zonelist;
1618 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1619 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1620 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1623 return zone ? zone->node : numa_node_id();
1631 /* Do static interleaving for a VMA with known offset. */
1632 static unsigned offset_il_node(struct mempolicy *pol,
1633 struct vm_area_struct *vma, unsigned long off)
1635 unsigned nnodes = nodes_weight(pol->v.nodes);
1641 return numa_node_id();
1642 target = (unsigned int)off % nnodes;
1645 nid = next_node(nid, pol->v.nodes);
1647 } while (c <= target);
1651 /* Determine a node number for interleave */
1652 static inline unsigned interleave_nid(struct mempolicy *pol,
1653 struct vm_area_struct *vma, unsigned long addr, int shift)
1659 * for small pages, there is no difference between
1660 * shift and PAGE_SHIFT, so the bit-shift is safe.
1661 * for huge pages, since vm_pgoff is in units of small
1662 * pages, we need to shift off the always 0 bits to get
1665 BUG_ON(shift < PAGE_SHIFT);
1666 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1667 off += (addr - vma->vm_start) >> shift;
1668 return offset_il_node(pol, vma, off);
1670 return interleave_nodes(pol);
1673 #ifdef CONFIG_HUGETLBFS
1675 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1676 * @vma = virtual memory area whose policy is sought
1677 * @addr = address in @vma for shared policy lookup and interleave policy
1678 * @gfp_flags = for requested zone
1679 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1680 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1682 * Returns a zonelist suitable for a huge page allocation and a pointer
1683 * to the struct mempolicy for conditional unref after allocation.
1684 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1685 * @nodemask for filtering the zonelist.
1687 * Must be protected by get_mems_allowed()
1689 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1690 gfp_t gfp_flags, struct mempolicy **mpol,
1691 nodemask_t **nodemask)
1693 struct zonelist *zl;
1695 *mpol = get_vma_policy(current, vma, addr);
1696 *nodemask = NULL; /* assume !MPOL_BIND */
1698 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1699 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1700 huge_page_shift(hstate_vma(vma))), gfp_flags);
1702 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1703 if ((*mpol)->mode == MPOL_BIND)
1704 *nodemask = &(*mpol)->v.nodes;
1710 * init_nodemask_of_mempolicy
1712 * If the current task's mempolicy is "default" [NULL], return 'false'
1713 * to indicate default policy. Otherwise, extract the policy nodemask
1714 * for 'bind' or 'interleave' policy into the argument nodemask, or
1715 * initialize the argument nodemask to contain the single node for
1716 * 'preferred' or 'local' policy and return 'true' to indicate presence
1717 * of non-default mempolicy.
1719 * We don't bother with reference counting the mempolicy [mpol_get/put]
1720 * because the current task is examining it's own mempolicy and a task's
1721 * mempolicy is only ever changed by the task itself.
1723 * N.B., it is the caller's responsibility to free a returned nodemask.
1725 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1727 struct mempolicy *mempolicy;
1730 if (!(mask && current->mempolicy))
1734 mempolicy = current->mempolicy;
1735 switch (mempolicy->mode) {
1736 case MPOL_PREFERRED:
1737 if (mempolicy->flags & MPOL_F_LOCAL)
1738 nid = numa_node_id();
1740 nid = mempolicy->v.preferred_node;
1741 init_nodemask_of_node(mask, nid);
1746 case MPOL_INTERLEAVE:
1747 *mask = mempolicy->v.nodes;
1753 task_unlock(current);
1760 * mempolicy_nodemask_intersects
1762 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1763 * policy. Otherwise, check for intersection between mask and the policy
1764 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1765 * policy, always return true since it may allocate elsewhere on fallback.
1767 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1769 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1770 const nodemask_t *mask)
1772 struct mempolicy *mempolicy;
1778 mempolicy = tsk->mempolicy;
1782 switch (mempolicy->mode) {
1783 case MPOL_PREFERRED:
1785 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1786 * allocate from, they may fallback to other nodes when oom.
1787 * Thus, it's possible for tsk to have allocated memory from
1792 case MPOL_INTERLEAVE:
1793 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1803 /* Allocate a page in interleaved policy.
1804 Own path because it needs to do special accounting. */
1805 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1808 struct zonelist *zl;
1811 zl = node_zonelist(nid, gfp);
1812 page = __alloc_pages(gfp, order, zl);
1813 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1814 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1819 * alloc_pages_vma - Allocate a page for a VMA.
1822 * %GFP_USER user allocation.
1823 * %GFP_KERNEL kernel allocations,
1824 * %GFP_HIGHMEM highmem/user allocations,
1825 * %GFP_FS allocation should not call back into a file system.
1826 * %GFP_ATOMIC don't sleep.
1828 * @order:Order of the GFP allocation.
1829 * @vma: Pointer to VMA or NULL if not available.
1830 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1832 * This function allocates a page from the kernel page pool and applies
1833 * a NUMA policy associated with the VMA or the current process.
1834 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1835 * mm_struct of the VMA to prevent it from going away. Should be used for
1836 * all allocations for pages that will be mapped into
1837 * user space. Returns NULL when no page can be allocated.
1839 * Should be called with the mm_sem of the vma hold.
1842 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1843 unsigned long addr, int node)
1845 struct mempolicy *pol;
1846 struct zonelist *zl;
1848 unsigned int cpuset_mems_cookie;
1851 pol = get_vma_policy(current, vma, addr);
1852 cpuset_mems_cookie = get_mems_allowed();
1854 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1857 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1859 page = alloc_page_interleave(gfp, order, nid);
1860 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1865 zl = policy_zonelist(gfp, pol, node);
1866 if (unlikely(mpol_needs_cond_ref(pol))) {
1868 * slow path: ref counted shared policy
1870 struct page *page = __alloc_pages_nodemask(gfp, order,
1871 zl, policy_nodemask(gfp, pol));
1873 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1878 * fast path: default or task policy
1880 page = __alloc_pages_nodemask(gfp, order, zl,
1881 policy_nodemask(gfp, pol));
1882 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1888 * alloc_pages_current - Allocate pages.
1891 * %GFP_USER user allocation,
1892 * %GFP_KERNEL kernel allocation,
1893 * %GFP_HIGHMEM highmem allocation,
1894 * %GFP_FS don't call back into a file system.
1895 * %GFP_ATOMIC don't sleep.
1896 * @order: Power of two of allocation size in pages. 0 is a single page.
1898 * Allocate a page from the kernel page pool. When not in
1899 * interrupt context and apply the current process NUMA policy.
1900 * Returns NULL when no page can be allocated.
1902 * Don't call cpuset_update_task_memory_state() unless
1903 * 1) it's ok to take cpuset_sem (can WAIT), and
1904 * 2) allocating for current task (not interrupt).
1906 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1908 struct mempolicy *pol = current->mempolicy;
1910 unsigned int cpuset_mems_cookie;
1912 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1913 pol = &default_policy;
1916 cpuset_mems_cookie = get_mems_allowed();
1919 * No reference counting needed for current->mempolicy
1920 * nor system default_policy
1922 if (pol->mode == MPOL_INTERLEAVE)
1923 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1925 page = __alloc_pages_nodemask(gfp, order,
1926 policy_zonelist(gfp, pol, numa_node_id()),
1927 policy_nodemask(gfp, pol));
1929 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1934 EXPORT_SYMBOL(alloc_pages_current);
1937 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1938 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1939 * with the mems_allowed returned by cpuset_mems_allowed(). This
1940 * keeps mempolicies cpuset relative after its cpuset moves. See
1941 * further kernel/cpuset.c update_nodemask().
1943 * current's mempolicy may be rebinded by the other task(the task that changes
1944 * cpuset's mems), so we needn't do rebind work for current task.
1947 /* Slow path of a mempolicy duplicate */
1948 struct mempolicy *__mpol_dup(struct mempolicy *old)
1950 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1953 return ERR_PTR(-ENOMEM);
1955 /* task's mempolicy is protected by alloc_lock */
1956 if (old == current->mempolicy) {
1959 task_unlock(current);
1964 if (current_cpuset_is_being_rebound()) {
1965 nodemask_t mems = cpuset_mems_allowed(current);
1966 if (new->flags & MPOL_F_REBINDING)
1967 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1969 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1972 atomic_set(&new->refcnt, 1);
1977 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1978 * eliminate the * MPOL_F_* flags that require conditional ref and
1979 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1980 * after return. Use the returned value.
1982 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1983 * policy lookup, even if the policy needs/has extra ref on lookup.
1984 * shmem_readahead needs this.
1986 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1987 struct mempolicy *frompol)
1989 if (!mpol_needs_cond_ref(frompol))
1993 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1994 __mpol_put(frompol);
1998 /* Slow path of a mempolicy comparison */
1999 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2003 if (a->mode != b->mode)
2005 if (a->flags != b->flags)
2007 if (mpol_store_user_nodemask(a))
2008 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2014 case MPOL_INTERLEAVE:
2015 return nodes_equal(a->v.nodes, b->v.nodes);
2016 case MPOL_PREFERRED:
2017 return a->v.preferred_node == b->v.preferred_node;
2025 * Shared memory backing store policy support.
2027 * Remember policies even when nobody has shared memory mapped.
2028 * The policies are kept in Red-Black tree linked from the inode.
2029 * They are protected by the sp->lock spinlock, which should be held
2030 * for any accesses to the tree.
2033 /* lookup first element intersecting start-end */
2034 /* Caller holds sp->mutex */
2035 static struct sp_node *
2036 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2038 struct rb_node *n = sp->root.rb_node;
2041 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2043 if (start >= p->end)
2045 else if (end <= p->start)
2053 struct sp_node *w = NULL;
2054 struct rb_node *prev = rb_prev(n);
2057 w = rb_entry(prev, struct sp_node, nd);
2058 if (w->end <= start)
2062 return rb_entry(n, struct sp_node, nd);
2065 /* Insert a new shared policy into the list. */
2066 /* Caller holds sp->lock */
2067 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2069 struct rb_node **p = &sp->root.rb_node;
2070 struct rb_node *parent = NULL;
2075 nd = rb_entry(parent, struct sp_node, nd);
2076 if (new->start < nd->start)
2078 else if (new->end > nd->end)
2079 p = &(*p)->rb_right;
2083 rb_link_node(&new->nd, parent, p);
2084 rb_insert_color(&new->nd, &sp->root);
2085 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2086 new->policy ? new->policy->mode : 0);
2089 /* Find shared policy intersecting idx */
2091 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2093 struct mempolicy *pol = NULL;
2096 if (!sp->root.rb_node)
2098 mutex_lock(&sp->mutex);
2099 sn = sp_lookup(sp, idx, idx+1);
2101 mpol_get(sn->policy);
2104 mutex_unlock(&sp->mutex);
2108 static void sp_free(struct sp_node *n)
2110 mpol_put(n->policy);
2111 kmem_cache_free(sn_cache, n);
2114 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2116 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2117 rb_erase(&n->nd, &sp->root);
2121 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2122 struct mempolicy *pol)
2125 struct mempolicy *newpol;
2127 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2131 newpol = mpol_dup(pol);
2132 if (IS_ERR(newpol)) {
2133 kmem_cache_free(sn_cache, n);
2136 newpol->flags |= MPOL_F_SHARED;
2145 /* Replace a policy range. */
2146 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2147 unsigned long end, struct sp_node *new)
2152 mutex_lock(&sp->mutex);
2153 n = sp_lookup(sp, start, end);
2154 /* Take care of old policies in the same range. */
2155 while (n && n->start < end) {
2156 struct rb_node *next = rb_next(&n->nd);
2157 if (n->start >= start) {
2163 /* Old policy spanning whole new range. */
2165 struct sp_node *new2;
2166 new2 = sp_alloc(end, n->end, n->policy);
2172 sp_insert(sp, new2);
2179 n = rb_entry(next, struct sp_node, nd);
2184 mutex_unlock(&sp->mutex);
2189 * mpol_shared_policy_init - initialize shared policy for inode
2190 * @sp: pointer to inode shared policy
2191 * @mpol: struct mempolicy to install
2193 * Install non-NULL @mpol in inode's shared policy rb-tree.
2194 * On entry, the current task has a reference on a non-NULL @mpol.
2195 * This must be released on exit.
2196 * This is called at get_inode() calls and we can use GFP_KERNEL.
2198 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2202 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2203 mutex_init(&sp->mutex);
2206 struct vm_area_struct pvma;
2207 struct mempolicy *new;
2208 NODEMASK_SCRATCH(scratch);
2212 /* contextualize the tmpfs mount point mempolicy */
2213 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2215 goto free_scratch; /* no valid nodemask intersection */
2218 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2219 task_unlock(current);
2223 /* Create pseudo-vma that contains just the policy */
2224 memset(&pvma, 0, sizeof(struct vm_area_struct));
2225 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2226 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2229 mpol_put(new); /* drop initial ref */
2231 NODEMASK_SCRATCH_FREE(scratch);
2233 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2237 int mpol_set_shared_policy(struct shared_policy *info,
2238 struct vm_area_struct *vma, struct mempolicy *npol)
2241 struct sp_node *new = NULL;
2242 unsigned long sz = vma_pages(vma);
2244 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2246 sz, npol ? npol->mode : -1,
2247 npol ? npol->flags : -1,
2248 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2251 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2255 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2261 /* Free a backing policy store on inode delete. */
2262 void mpol_free_shared_policy(struct shared_policy *p)
2265 struct rb_node *next;
2267 if (!p->root.rb_node)
2269 mutex_lock(&p->mutex);
2270 next = rb_first(&p->root);
2272 n = rb_entry(next, struct sp_node, nd);
2273 next = rb_next(&n->nd);
2276 mutex_unlock(&p->mutex);
2279 /* assumes fs == KERNEL_DS */
2280 void __init numa_policy_init(void)
2282 nodemask_t interleave_nodes;
2283 unsigned long largest = 0;
2284 int nid, prefer = 0;
2286 policy_cache = kmem_cache_create("numa_policy",
2287 sizeof(struct mempolicy),
2288 0, SLAB_PANIC, NULL);
2290 sn_cache = kmem_cache_create("shared_policy_node",
2291 sizeof(struct sp_node),
2292 0, SLAB_PANIC, NULL);
2295 * Set interleaving policy for system init. Interleaving is only
2296 * enabled across suitably sized nodes (default is >= 16MB), or
2297 * fall back to the largest node if they're all smaller.
2299 nodes_clear(interleave_nodes);
2300 for_each_node_state(nid, N_HIGH_MEMORY) {
2301 unsigned long total_pages = node_present_pages(nid);
2303 /* Preserve the largest node */
2304 if (largest < total_pages) {
2305 largest = total_pages;
2309 /* Interleave this node? */
2310 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2311 node_set(nid, interleave_nodes);
2314 /* All too small, use the largest */
2315 if (unlikely(nodes_empty(interleave_nodes)))
2316 node_set(prefer, interleave_nodes);
2318 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2319 printk("numa_policy_init: interleaving failed\n");
2322 /* Reset policy of current process to default */
2323 void numa_default_policy(void)
2325 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2329 * Parse and format mempolicy from/to strings
2333 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2334 * Used only for mpol_parse_str() and mpol_to_str()
2336 #define MPOL_LOCAL MPOL_MAX
2337 static const char * const policy_modes[] =
2339 [MPOL_DEFAULT] = "default",
2340 [MPOL_PREFERRED] = "prefer",
2341 [MPOL_BIND] = "bind",
2342 [MPOL_INTERLEAVE] = "interleave",
2343 [MPOL_LOCAL] = "local"
2349 * mpol_parse_str - parse string to mempolicy
2350 * @str: string containing mempolicy to parse
2351 * @mpol: pointer to struct mempolicy pointer, returned on success.
2352 * @no_context: flag whether to "contextualize" the mempolicy
2355 * <mode>[=<flags>][:<nodelist>]
2357 * if @no_context is true, save the input nodemask in w.user_nodemask in
2358 * the returned mempolicy. This will be used to "clone" the mempolicy in
2359 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2360 * mount option. Note that if 'static' or 'relative' mode flags were
2361 * specified, the input nodemask will already have been saved. Saving
2362 * it again is redundant, but safe.
2364 * On success, returns 0, else 1
2366 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2368 struct mempolicy *new = NULL;
2369 unsigned short mode;
2370 unsigned short uninitialized_var(mode_flags);
2372 char *nodelist = strchr(str, ':');
2373 char *flags = strchr(str, '=');
2377 /* NUL-terminate mode or flags string */
2379 if (nodelist_parse(nodelist, nodes))
2381 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2387 *flags++ = '\0'; /* terminate mode string */
2389 for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2390 if (!strcmp(str, policy_modes[mode])) {
2394 if (mode > MPOL_LOCAL)
2398 case MPOL_PREFERRED:
2400 * Insist on a nodelist of one node only
2403 char *rest = nodelist;
2404 while (isdigit(*rest))
2410 case MPOL_INTERLEAVE:
2412 * Default to online nodes with memory if no nodelist
2415 nodes = node_states[N_HIGH_MEMORY];
2419 * Don't allow a nodelist; mpol_new() checks flags
2423 mode = MPOL_PREFERRED;
2427 * Insist on a empty nodelist
2434 * Insist on a nodelist
2443 * Currently, we only support two mutually exclusive
2446 if (!strcmp(flags, "static"))
2447 mode_flags |= MPOL_F_STATIC_NODES;
2448 else if (!strcmp(flags, "relative"))
2449 mode_flags |= MPOL_F_RELATIVE_NODES;
2454 new = mpol_new(mode, mode_flags, &nodes);
2459 /* save for contextualization */
2460 new->w.user_nodemask = nodes;
2463 NODEMASK_SCRATCH(scratch);
2466 ret = mpol_set_nodemask(new, &nodes, scratch);
2467 task_unlock(current);
2470 NODEMASK_SCRATCH_FREE(scratch);
2479 /* Restore string for error message */
2488 #endif /* CONFIG_TMPFS */
2491 * mpol_to_str - format a mempolicy structure for printing
2492 * @buffer: to contain formatted mempolicy string
2493 * @maxlen: length of @buffer
2494 * @pol: pointer to mempolicy to be formatted
2495 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2497 * Convert a mempolicy into a string.
2498 * Returns the number of characters in buffer (if positive)
2499 * or an error (negative)
2501 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2506 unsigned short mode;
2507 unsigned short flags = pol ? pol->flags : 0;
2510 * Sanity check: room for longest mode, flag and some nodes
2512 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2514 if (!pol || pol == &default_policy)
2515 mode = MPOL_DEFAULT;
2524 case MPOL_PREFERRED:
2526 if (flags & MPOL_F_LOCAL)
2527 mode = MPOL_LOCAL; /* pseudo-policy */
2529 node_set(pol->v.preferred_node, nodes);
2534 case MPOL_INTERLEAVE:
2536 nodes = pol->w.user_nodemask;
2538 nodes = pol->v.nodes;
2545 l = strlen(policy_modes[mode]);
2546 if (buffer + maxlen < p + l + 1)
2549 strcpy(p, policy_modes[mode]);
2552 if (flags & MPOL_MODE_FLAGS) {
2553 if (buffer + maxlen < p + 2)
2558 * Currently, the only defined flags are mutually exclusive
2560 if (flags & MPOL_F_STATIC_NODES)
2561 p += snprintf(p, buffer + maxlen - p, "static");
2562 else if (flags & MPOL_F_RELATIVE_NODES)
2563 p += snprintf(p, buffer + maxlen - p, "relative");
2566 if (!nodes_empty(nodes)) {
2567 if (buffer + maxlen < p + 2)
2570 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);