2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #include <linux/mempolicy.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/gfp.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/module.h>
80 #include <linux/nsproxy.h>
81 #include <linux/interrupt.h>
82 #include <linux/init.h>
83 #include <linux/compat.h>
84 #include <linux/swap.h>
85 #include <linux/seq_file.h>
86 #include <linux/proc_fs.h>
87 #include <linux/migrate.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
92 #include <asm/tlbflush.h>
93 #include <asm/uaccess.h>
96 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
97 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
98 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
100 static struct kmem_cache *policy_cache;
101 static struct kmem_cache *sn_cache;
103 /* Highest zone. An specific allocation for a zone below that is not
105 enum zone_type policy_zone = 0;
107 struct mempolicy default_policy = {
108 .refcnt = ATOMIC_INIT(1), /* never free it */
109 .policy = MPOL_DEFAULT,
112 static const struct mempolicy_operations {
113 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
114 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
115 } mpol_ops[MPOL_MAX];
117 /* Check that the nodemask contains at least one populated zone */
118 static int is_valid_nodemask(const nodemask_t *nodemask)
122 /* Check that there is something useful in this mask */
125 for_each_node_mask(nd, *nodemask) {
128 for (k = 0; k <= policy_zone; k++) {
129 z = &NODE_DATA(nd)->node_zones[k];
130 if (z->present_pages > 0)
138 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
140 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
143 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
144 const nodemask_t *rel)
147 nodes_fold(tmp, *orig, nodes_weight(*rel));
148 nodes_onto(*ret, tmp, *rel);
151 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
153 if (nodes_empty(*nodes))
155 pol->v.nodes = *nodes;
159 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
162 pol->v.preferred_node = -1; /* local allocation */
163 else if (nodes_empty(*nodes))
164 return -EINVAL; /* no allowed nodes */
166 pol->v.preferred_node = first_node(*nodes);
170 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
172 if (!is_valid_nodemask(nodes))
174 pol->v.nodes = *nodes;
178 /* Create a new policy */
179 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
182 struct mempolicy *policy;
183 nodemask_t cpuset_context_nmask;
186 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
187 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
189 if (mode == MPOL_DEFAULT) {
190 if (nodes && !nodes_empty(*nodes))
191 return ERR_PTR(-EINVAL);
197 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
198 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
199 * All other modes require a valid pointer to a non-empty nodemask.
201 if (mode == MPOL_PREFERRED) {
202 if (nodes_empty(*nodes)) {
203 if (((flags & MPOL_F_STATIC_NODES) ||
204 (flags & MPOL_F_RELATIVE_NODES)))
205 return ERR_PTR(-EINVAL);
206 nodes = NULL; /* flag local alloc */
208 } else if (nodes_empty(*nodes))
209 return ERR_PTR(-EINVAL);
210 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
212 return ERR_PTR(-ENOMEM);
213 atomic_set(&policy->refcnt, 1);
214 policy->policy = mode;
215 policy->flags = flags;
219 * cpuset related setup doesn't apply to local allocation
221 cpuset_update_task_memory_state();
222 if (flags & MPOL_F_RELATIVE_NODES)
223 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
224 &cpuset_current_mems_allowed);
226 nodes_and(cpuset_context_nmask, *nodes,
227 cpuset_current_mems_allowed);
228 if (mpol_store_user_nodemask(policy))
229 policy->w.user_nodemask = *nodes;
231 policy->w.cpuset_mems_allowed =
232 cpuset_mems_allowed(current);
235 ret = mpol_ops[mode].create(policy,
236 nodes ? &cpuset_context_nmask : NULL);
238 kmem_cache_free(policy_cache, policy);
244 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
248 static void mpol_rebind_nodemask(struct mempolicy *pol,
249 const nodemask_t *nodes)
253 if (pol->flags & MPOL_F_STATIC_NODES)
254 nodes_and(tmp, pol->w.user_nodemask, *nodes);
255 else if (pol->flags & MPOL_F_RELATIVE_NODES)
256 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
258 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
260 pol->w.cpuset_mems_allowed = *nodes;
264 if (!node_isset(current->il_next, tmp)) {
265 current->il_next = next_node(current->il_next, tmp);
266 if (current->il_next >= MAX_NUMNODES)
267 current->il_next = first_node(tmp);
268 if (current->il_next >= MAX_NUMNODES)
269 current->il_next = numa_node_id();
273 static void mpol_rebind_preferred(struct mempolicy *pol,
274 const nodemask_t *nodes)
278 if (pol->flags & MPOL_F_STATIC_NODES) {
279 int node = first_node(pol->w.user_nodemask);
281 if (node_isset(node, *nodes))
282 pol->v.preferred_node = node;
284 pol->v.preferred_node = -1;
285 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
286 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
287 pol->v.preferred_node = first_node(tmp);
288 } else if (pol->v.preferred_node != -1) {
289 pol->v.preferred_node = node_remap(pol->v.preferred_node,
290 pol->w.cpuset_mems_allowed,
292 pol->w.cpuset_mems_allowed = *nodes;
296 /* Migrate a policy to a different set of nodes */
297 static void mpol_rebind_policy(struct mempolicy *pol,
298 const nodemask_t *newmask)
302 if (!mpol_store_user_nodemask(pol) &&
303 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
305 mpol_ops[pol->policy].rebind(pol, newmask);
309 * Wrapper for mpol_rebind_policy() that just requires task
310 * pointer, and updates task mempolicy.
313 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
315 mpol_rebind_policy(tsk->mempolicy, new);
319 * Rebind each vma in mm to new nodemask.
321 * Call holding a reference to mm. Takes mm->mmap_sem during call.
324 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
326 struct vm_area_struct *vma;
328 down_write(&mm->mmap_sem);
329 for (vma = mm->mmap; vma; vma = vma->vm_next)
330 mpol_rebind_policy(vma->vm_policy, new);
331 up_write(&mm->mmap_sem);
334 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
336 .rebind = mpol_rebind_default,
338 [MPOL_INTERLEAVE] = {
339 .create = mpol_new_interleave,
340 .rebind = mpol_rebind_nodemask,
343 .create = mpol_new_preferred,
344 .rebind = mpol_rebind_preferred,
347 .create = mpol_new_bind,
348 .rebind = mpol_rebind_nodemask,
352 static void gather_stats(struct page *, void *, int pte_dirty);
353 static void migrate_page_add(struct page *page, struct list_head *pagelist,
354 unsigned long flags);
356 /* Scan through pages checking if pages follow certain conditions. */
357 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
358 unsigned long addr, unsigned long end,
359 const nodemask_t *nodes, unsigned long flags,
366 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
371 if (!pte_present(*pte))
373 page = vm_normal_page(vma, addr, *pte);
377 * The check for PageReserved here is important to avoid
378 * handling zero pages and other pages that may have been
379 * marked special by the system.
381 * If the PageReserved would not be checked here then f.e.
382 * the location of the zero page could have an influence
383 * on MPOL_MF_STRICT, zero pages would be counted for
384 * the per node stats, and there would be useless attempts
385 * to put zero pages on the migration list.
387 if (PageReserved(page))
389 nid = page_to_nid(page);
390 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
393 if (flags & MPOL_MF_STATS)
394 gather_stats(page, private, pte_dirty(*pte));
395 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
396 migrate_page_add(page, private, flags);
399 } while (pte++, addr += PAGE_SIZE, addr != end);
400 pte_unmap_unlock(orig_pte, ptl);
404 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
405 unsigned long addr, unsigned long end,
406 const nodemask_t *nodes, unsigned long flags,
412 pmd = pmd_offset(pud, addr);
414 next = pmd_addr_end(addr, end);
415 if (pmd_none_or_clear_bad(pmd))
417 if (check_pte_range(vma, pmd, addr, next, nodes,
420 } while (pmd++, addr = next, addr != end);
424 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
425 unsigned long addr, unsigned long end,
426 const nodemask_t *nodes, unsigned long flags,
432 pud = pud_offset(pgd, addr);
434 next = pud_addr_end(addr, end);
435 if (pud_none_or_clear_bad(pud))
437 if (check_pmd_range(vma, pud, addr, next, nodes,
440 } while (pud++, addr = next, addr != end);
444 static inline int check_pgd_range(struct vm_area_struct *vma,
445 unsigned long addr, unsigned long end,
446 const nodemask_t *nodes, unsigned long flags,
452 pgd = pgd_offset(vma->vm_mm, addr);
454 next = pgd_addr_end(addr, end);
455 if (pgd_none_or_clear_bad(pgd))
457 if (check_pud_range(vma, pgd, addr, next, nodes,
460 } while (pgd++, addr = next, addr != end);
465 * Check if all pages in a range are on a set of nodes.
466 * If pagelist != NULL then isolate pages from the LRU and
467 * put them on the pagelist.
469 static struct vm_area_struct *
470 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
471 const nodemask_t *nodes, unsigned long flags, void *private)
474 struct vm_area_struct *first, *vma, *prev;
476 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478 err = migrate_prep();
483 first = find_vma(mm, start);
485 return ERR_PTR(-EFAULT);
487 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
488 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
489 if (!vma->vm_next && vma->vm_end < end)
490 return ERR_PTR(-EFAULT);
491 if (prev && prev->vm_end < vma->vm_start)
492 return ERR_PTR(-EFAULT);
494 if (!is_vm_hugetlb_page(vma) &&
495 ((flags & MPOL_MF_STRICT) ||
496 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
497 vma_migratable(vma)))) {
498 unsigned long endvma = vma->vm_end;
502 if (vma->vm_start > start)
503 start = vma->vm_start;
504 err = check_pgd_range(vma, start, endvma, nodes,
507 first = ERR_PTR(err);
516 /* Apply policy to a single VMA */
517 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
520 struct mempolicy *old = vma->vm_policy;
522 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
523 vma->vm_start, vma->vm_end, vma->vm_pgoff,
524 vma->vm_ops, vma->vm_file,
525 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
527 if (vma->vm_ops && vma->vm_ops->set_policy)
528 err = vma->vm_ops->set_policy(vma, new);
531 vma->vm_policy = new;
537 /* Step 2: apply policy to a range and do splits. */
538 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
539 unsigned long end, struct mempolicy *new)
541 struct vm_area_struct *next;
545 for (; vma && vma->vm_start < end; vma = next) {
547 if (vma->vm_start < start)
548 err = split_vma(vma->vm_mm, vma, start, 1);
549 if (!err && vma->vm_end > end)
550 err = split_vma(vma->vm_mm, vma, end, 0);
552 err = policy_vma(vma, new);
560 * Update task->flags PF_MEMPOLICY bit: set iff non-default
561 * mempolicy. Allows more rapid checking of this (combined perhaps
562 * with other PF_* flag bits) on memory allocation hot code paths.
564 * If called from outside this file, the task 'p' should -only- be
565 * a newly forked child not yet visible on the task list, because
566 * manipulating the task flags of a visible task is not safe.
568 * The above limitation is why this routine has the funny name
569 * mpol_fix_fork_child_flag().
571 * It is also safe to call this with a task pointer of current,
572 * which the static wrapper mpol_set_task_struct_flag() does,
573 * for use within this file.
576 void mpol_fix_fork_child_flag(struct task_struct *p)
579 p->flags |= PF_MEMPOLICY;
581 p->flags &= ~PF_MEMPOLICY;
584 static void mpol_set_task_struct_flag(void)
586 mpol_fix_fork_child_flag(current);
589 /* Set the process memory policy */
590 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
593 struct mempolicy *new;
594 struct mm_struct *mm = current->mm;
596 new = mpol_new(mode, flags, nodes);
601 * prevent changing our mempolicy while show_numa_maps()
603 * Note: do_set_mempolicy() can be called at init time
607 down_write(&mm->mmap_sem);
608 mpol_put(current->mempolicy);
609 current->mempolicy = new;
610 mpol_set_task_struct_flag();
611 if (new && new->policy == MPOL_INTERLEAVE &&
612 nodes_weight(new->v.nodes))
613 current->il_next = first_node(new->v.nodes);
615 up_write(&mm->mmap_sem);
620 /* Fill a zone bitmap for a policy */
621 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
629 case MPOL_INTERLEAVE:
633 /* or use current node instead of memory_map? */
634 if (p->v.preferred_node < 0)
635 *nodes = node_states[N_HIGH_MEMORY];
637 node_set(p->v.preferred_node, *nodes);
644 static int lookup_node(struct mm_struct *mm, unsigned long addr)
649 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
651 err = page_to_nid(p);
657 /* Retrieve NUMA policy */
658 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
659 unsigned long addr, unsigned long flags)
662 struct mm_struct *mm = current->mm;
663 struct vm_area_struct *vma = NULL;
664 struct mempolicy *pol = current->mempolicy;
666 cpuset_update_task_memory_state();
668 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
671 if (flags & MPOL_F_MEMS_ALLOWED) {
672 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
674 *policy = 0; /* just so it's initialized */
675 *nmask = cpuset_current_mems_allowed;
679 if (flags & MPOL_F_ADDR) {
680 down_read(&mm->mmap_sem);
681 vma = find_vma_intersection(mm, addr, addr+1);
683 up_read(&mm->mmap_sem);
686 if (vma->vm_ops && vma->vm_ops->get_policy)
687 pol = vma->vm_ops->get_policy(vma, addr);
689 pol = vma->vm_policy;
694 pol = &default_policy;
696 if (flags & MPOL_F_NODE) {
697 if (flags & MPOL_F_ADDR) {
698 err = lookup_node(mm, addr);
702 } else if (pol == current->mempolicy &&
703 pol->policy == MPOL_INTERLEAVE) {
704 *policy = current->il_next;
710 *policy = pol->policy | pol->flags;
713 up_read(¤t->mm->mmap_sem);
719 get_zonemask(pol, nmask);
723 up_read(¤t->mm->mmap_sem);
727 #ifdef CONFIG_MIGRATION
731 static void migrate_page_add(struct page *page, struct list_head *pagelist,
735 * Avoid migrating a page that is shared with others.
737 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
738 isolate_lru_page(page, pagelist);
741 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
743 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
747 * Migrate pages from one node to a target node.
748 * Returns error or the number of pages not migrated.
750 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
758 node_set(source, nmask);
760 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
761 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
763 if (!list_empty(&pagelist))
764 err = migrate_pages(&pagelist, new_node_page, dest);
770 * Move pages between the two nodesets so as to preserve the physical
771 * layout as much as possible.
773 * Returns the number of page that could not be moved.
775 int do_migrate_pages(struct mm_struct *mm,
776 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
783 down_read(&mm->mmap_sem);
785 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
790 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
791 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
792 * bit in 'tmp', and return that <source, dest> pair for migration.
793 * The pair of nodemasks 'to' and 'from' define the map.
795 * If no pair of bits is found that way, fallback to picking some
796 * pair of 'source' and 'dest' bits that are not the same. If the
797 * 'source' and 'dest' bits are the same, this represents a node
798 * that will be migrating to itself, so no pages need move.
800 * If no bits are left in 'tmp', or if all remaining bits left
801 * in 'tmp' correspond to the same bit in 'to', return false
802 * (nothing left to migrate).
804 * This lets us pick a pair of nodes to migrate between, such that
805 * if possible the dest node is not already occupied by some other
806 * source node, minimizing the risk of overloading the memory on a
807 * node that would happen if we migrated incoming memory to a node
808 * before migrating outgoing memory source that same node.
810 * A single scan of tmp is sufficient. As we go, we remember the
811 * most recent <s, d> pair that moved (s != d). If we find a pair
812 * that not only moved, but what's better, moved to an empty slot
813 * (d is not set in tmp), then we break out then, with that pair.
814 * Otherwise when we finish scannng from_tmp, we at least have the
815 * most recent <s, d> pair that moved. If we get all the way through
816 * the scan of tmp without finding any node that moved, much less
817 * moved to an empty node, then there is nothing left worth migrating.
821 while (!nodes_empty(tmp)) {
826 for_each_node_mask(s, tmp) {
827 d = node_remap(s, *from_nodes, *to_nodes);
831 source = s; /* Node moved. Memorize */
834 /* dest not in remaining from nodes? */
835 if (!node_isset(dest, tmp))
841 node_clear(source, tmp);
842 err = migrate_to_node(mm, source, dest, flags);
849 up_read(&mm->mmap_sem);
857 * Allocate a new page for page migration based on vma policy.
858 * Start assuming that page is mapped by vma pointed to by @private.
859 * Search forward from there, if not. N.B., this assumes that the
860 * list of pages handed to migrate_pages()--which is how we get here--
861 * is in virtual address order.
863 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
865 struct vm_area_struct *vma = (struct vm_area_struct *)private;
866 unsigned long uninitialized_var(address);
869 address = page_address_in_vma(page, vma);
870 if (address != -EFAULT)
876 * if !vma, alloc_page_vma() will use task or system default policy
878 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
882 static void migrate_page_add(struct page *page, struct list_head *pagelist,
887 int do_migrate_pages(struct mm_struct *mm,
888 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
893 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
899 static long do_mbind(unsigned long start, unsigned long len,
900 unsigned short mode, unsigned short mode_flags,
901 nodemask_t *nmask, unsigned long flags)
903 struct vm_area_struct *vma;
904 struct mm_struct *mm = current->mm;
905 struct mempolicy *new;
910 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
911 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
913 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
916 if (start & ~PAGE_MASK)
919 if (mode == MPOL_DEFAULT)
920 flags &= ~MPOL_MF_STRICT;
922 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
930 new = mpol_new(mode, mode_flags, nmask);
935 * If we are using the default policy then operation
936 * on discontinuous address spaces is okay after all
939 flags |= MPOL_MF_DISCONTIG_OK;
941 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
942 start, start + len, mode, mode_flags,
943 nmask ? nodes_addr(*nmask)[0] : -1);
945 down_write(&mm->mmap_sem);
946 vma = check_range(mm, start, end, nmask,
947 flags | MPOL_MF_INVERT, &pagelist);
953 err = mbind_range(vma, start, end, new);
955 if (!list_empty(&pagelist))
956 nr_failed = migrate_pages(&pagelist, new_vma_page,
959 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
963 up_write(&mm->mmap_sem);
969 * User space interface with variable sized bitmaps for nodelists.
972 /* Copy a node mask from user space. */
973 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
974 unsigned long maxnode)
977 unsigned long nlongs;
978 unsigned long endmask;
982 if (maxnode == 0 || !nmask)
984 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
987 nlongs = BITS_TO_LONGS(maxnode);
988 if ((maxnode % BITS_PER_LONG) == 0)
991 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
993 /* When the user specified more nodes than supported just check
994 if the non supported part is all zero. */
995 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
996 if (nlongs > PAGE_SIZE/sizeof(long))
998 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1000 if (get_user(t, nmask + k))
1002 if (k == nlongs - 1) {
1008 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1012 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1014 nodes_addr(*nodes)[nlongs-1] &= endmask;
1018 /* Copy a kernel node mask to user space */
1019 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1022 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1023 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1025 if (copy > nbytes) {
1026 if (copy > PAGE_SIZE)
1028 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1032 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1035 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
1037 unsigned long __user *nmask, unsigned long maxnode,
1042 unsigned short mode_flags;
1044 mode_flags = mode & MPOL_MODE_FLAGS;
1045 mode &= ~MPOL_MODE_FLAGS;
1046 if (mode >= MPOL_MAX)
1048 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1049 (mode_flags & MPOL_F_RELATIVE_NODES))
1051 err = get_nodes(&nodes, nmask, maxnode);
1054 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1057 /* Set the process memory policy */
1058 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
1059 unsigned long maxnode)
1063 unsigned short flags;
1065 flags = mode & MPOL_MODE_FLAGS;
1066 mode &= ~MPOL_MODE_FLAGS;
1067 if ((unsigned int)mode >= MPOL_MAX)
1069 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1071 err = get_nodes(&nodes, nmask, maxnode);
1074 return do_set_mempolicy(mode, flags, &nodes);
1077 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1078 const unsigned long __user *old_nodes,
1079 const unsigned long __user *new_nodes)
1081 struct mm_struct *mm;
1082 struct task_struct *task;
1085 nodemask_t task_nodes;
1088 err = get_nodes(&old, old_nodes, maxnode);
1092 err = get_nodes(&new, new_nodes, maxnode);
1096 /* Find the mm_struct */
1097 read_lock(&tasklist_lock);
1098 task = pid ? find_task_by_vpid(pid) : current;
1100 read_unlock(&tasklist_lock);
1103 mm = get_task_mm(task);
1104 read_unlock(&tasklist_lock);
1110 * Check if this process has the right to modify the specified
1111 * process. The right exists if the process has administrative
1112 * capabilities, superuser privileges or the same
1113 * userid as the target process.
1115 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1116 (current->uid != task->suid) && (current->uid != task->uid) &&
1117 !capable(CAP_SYS_NICE)) {
1122 task_nodes = cpuset_mems_allowed(task);
1123 /* Is the user allowed to access the target nodes? */
1124 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1129 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1134 err = security_task_movememory(task);
1138 err = do_migrate_pages(mm, &old, &new,
1139 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1146 /* Retrieve NUMA policy */
1147 asmlinkage long sys_get_mempolicy(int __user *policy,
1148 unsigned long __user *nmask,
1149 unsigned long maxnode,
1150 unsigned long addr, unsigned long flags)
1153 int uninitialized_var(pval);
1156 if (nmask != NULL && maxnode < MAX_NUMNODES)
1159 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1164 if (policy && put_user(pval, policy))
1168 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1173 #ifdef CONFIG_COMPAT
1175 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1176 compat_ulong_t __user *nmask,
1177 compat_ulong_t maxnode,
1178 compat_ulong_t addr, compat_ulong_t flags)
1181 unsigned long __user *nm = NULL;
1182 unsigned long nr_bits, alloc_size;
1183 DECLARE_BITMAP(bm, MAX_NUMNODES);
1185 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1186 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1189 nm = compat_alloc_user_space(alloc_size);
1191 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1193 if (!err && nmask) {
1194 err = copy_from_user(bm, nm, alloc_size);
1195 /* ensure entire bitmap is zeroed */
1196 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1197 err |= compat_put_bitmap(nmask, bm, nr_bits);
1203 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1204 compat_ulong_t maxnode)
1207 unsigned long __user *nm = NULL;
1208 unsigned long nr_bits, alloc_size;
1209 DECLARE_BITMAP(bm, MAX_NUMNODES);
1211 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1212 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1215 err = compat_get_bitmap(bm, nmask, nr_bits);
1216 nm = compat_alloc_user_space(alloc_size);
1217 err |= copy_to_user(nm, bm, alloc_size);
1223 return sys_set_mempolicy(mode, nm, nr_bits+1);
1226 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1227 compat_ulong_t mode, compat_ulong_t __user *nmask,
1228 compat_ulong_t maxnode, compat_ulong_t flags)
1231 unsigned long __user *nm = NULL;
1232 unsigned long nr_bits, alloc_size;
1235 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1236 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1239 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1240 nm = compat_alloc_user_space(alloc_size);
1241 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1247 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1253 * get_vma_policy(@task, @vma, @addr)
1254 * @task - task for fallback if vma policy == default
1255 * @vma - virtual memory area whose policy is sought
1256 * @addr - address in @vma for shared policy lookup
1258 * Returns effective policy for a VMA at specified address.
1259 * Falls back to @task or system default policy, as necessary.
1260 * Returned policy has extra reference count if shared, vma,
1261 * or some other task's policy [show_numa_maps() can pass
1262 * @task != current]. It is the caller's responsibility to
1263 * free the reference in these cases.
1265 static struct mempolicy * get_vma_policy(struct task_struct *task,
1266 struct vm_area_struct *vma, unsigned long addr)
1268 struct mempolicy *pol = task->mempolicy;
1272 if (vma->vm_ops && vma->vm_ops->get_policy) {
1273 pol = vma->vm_ops->get_policy(vma, addr);
1274 shared_pol = 1; /* if pol non-NULL, add ref below */
1275 } else if (vma->vm_policy &&
1276 vma->vm_policy->policy != MPOL_DEFAULT)
1277 pol = vma->vm_policy;
1280 pol = &default_policy;
1281 else if (!shared_pol && pol != current->mempolicy)
1282 mpol_get(pol); /* vma or other task's policy */
1286 /* Return a nodemask representing a mempolicy */
1287 static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1289 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1290 if (unlikely(policy->policy == MPOL_BIND) &&
1291 gfp_zone(gfp) >= policy_zone &&
1292 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1293 return &policy->v.nodes;
1298 /* Return a zonelist representing a mempolicy */
1299 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1303 switch (policy->policy) {
1304 case MPOL_PREFERRED:
1305 nd = policy->v.preferred_node;
1307 nd = numa_node_id();
1311 * Normally, MPOL_BIND allocations node-local are node-local
1312 * within the allowed nodemask. However, if __GFP_THISNODE is
1313 * set and the current node is part of the mask, we use the
1314 * the zonelist for the first node in the mask instead.
1316 nd = numa_node_id();
1317 if (unlikely(gfp & __GFP_THISNODE) &&
1318 unlikely(!node_isset(nd, policy->v.nodes)))
1319 nd = first_node(policy->v.nodes);
1321 case MPOL_INTERLEAVE: /* should not happen */
1323 nd = numa_node_id();
1329 return node_zonelist(nd, gfp);
1332 /* Do dynamic interleaving for a process */
1333 static unsigned interleave_nodes(struct mempolicy *policy)
1336 struct task_struct *me = current;
1339 next = next_node(nid, policy->v.nodes);
1340 if (next >= MAX_NUMNODES)
1341 next = first_node(policy->v.nodes);
1342 if (next < MAX_NUMNODES)
1348 * Depending on the memory policy provide a node from which to allocate the
1351 unsigned slab_node(struct mempolicy *policy)
1353 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
1356 case MPOL_INTERLEAVE:
1357 return interleave_nodes(policy);
1361 * Follow bind policy behavior and start allocation at the
1364 struct zonelist *zonelist;
1366 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1367 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1368 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1374 case MPOL_PREFERRED:
1375 if (policy->v.preferred_node >= 0)
1376 return policy->v.preferred_node;
1380 return numa_node_id();
1384 /* Do static interleaving for a VMA with known offset. */
1385 static unsigned offset_il_node(struct mempolicy *pol,
1386 struct vm_area_struct *vma, unsigned long off)
1388 unsigned nnodes = nodes_weight(pol->v.nodes);
1394 return numa_node_id();
1395 target = (unsigned int)off % nnodes;
1398 nid = next_node(nid, pol->v.nodes);
1400 } while (c <= target);
1404 /* Determine a node number for interleave */
1405 static inline unsigned interleave_nid(struct mempolicy *pol,
1406 struct vm_area_struct *vma, unsigned long addr, int shift)
1412 * for small pages, there is no difference between
1413 * shift and PAGE_SHIFT, so the bit-shift is safe.
1414 * for huge pages, since vm_pgoff is in units of small
1415 * pages, we need to shift off the always 0 bits to get
1418 BUG_ON(shift < PAGE_SHIFT);
1419 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1420 off += (addr - vma->vm_start) >> shift;
1421 return offset_il_node(pol, vma, off);
1423 return interleave_nodes(pol);
1426 #ifdef CONFIG_HUGETLBFS
1428 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1429 * @vma = virtual memory area whose policy is sought
1430 * @addr = address in @vma for shared policy lookup and interleave policy
1431 * @gfp_flags = for requested zone
1432 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1433 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1435 * Returns a zonelist suitable for a huge page allocation.
1436 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1437 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
1438 * If it is also a policy for which get_vma_policy() returns an extra
1439 * reference, we must hold that reference until after the allocation.
1440 * In that case, return policy via @mpol so hugetlb allocation can drop
1441 * the reference. For non-'BIND referenced policies, we can/do drop the
1442 * reference here, so the caller doesn't need to know about the special case
1443 * for default and current task policy.
1445 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1446 gfp_t gfp_flags, struct mempolicy **mpol,
1447 nodemask_t **nodemask)
1449 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1450 struct zonelist *zl;
1452 *mpol = NULL; /* probably no unref needed */
1453 *nodemask = NULL; /* assume !MPOL_BIND */
1454 if (pol->policy == MPOL_BIND) {
1455 *nodemask = &pol->v.nodes;
1456 } else if (pol->policy == MPOL_INTERLEAVE) {
1459 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1460 if (unlikely(pol != &default_policy &&
1461 pol != current->mempolicy))
1462 __mpol_put(pol); /* finished with pol */
1463 return node_zonelist(nid, gfp_flags);
1466 zl = zonelist_policy(GFP_HIGHUSER, pol);
1467 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1468 if (pol->policy != MPOL_BIND)
1469 __mpol_put(pol); /* finished with pol */
1471 *mpol = pol; /* unref needed after allocation */
1477 /* Allocate a page in interleaved policy.
1478 Own path because it needs to do special accounting. */
1479 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1482 struct zonelist *zl;
1485 zl = node_zonelist(nid, gfp);
1486 page = __alloc_pages(gfp, order, zl);
1487 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1488 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1493 * alloc_page_vma - Allocate a page for a VMA.
1496 * %GFP_USER user allocation.
1497 * %GFP_KERNEL kernel allocations,
1498 * %GFP_HIGHMEM highmem/user allocations,
1499 * %GFP_FS allocation should not call back into a file system.
1500 * %GFP_ATOMIC don't sleep.
1502 * @vma: Pointer to VMA or NULL if not available.
1503 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1505 * This function allocates a page from the kernel page pool and applies
1506 * a NUMA policy associated with the VMA or the current process.
1507 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1508 * mm_struct of the VMA to prevent it from going away. Should be used for
1509 * all allocations for pages that will be mapped into
1510 * user space. Returns NULL when no page can be allocated.
1512 * Should be called with the mm_sem of the vma hold.
1515 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1517 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1518 struct zonelist *zl;
1520 cpuset_update_task_memory_state();
1522 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1525 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1526 if (unlikely(pol != &default_policy &&
1527 pol != current->mempolicy))
1528 __mpol_put(pol); /* finished with pol */
1529 return alloc_page_interleave(gfp, 0, nid);
1531 zl = zonelist_policy(gfp, pol);
1532 if (pol != &default_policy && pol != current->mempolicy) {
1534 * slow path: ref counted policy -- shared or vma
1536 struct page *page = __alloc_pages_nodemask(gfp, 0,
1537 zl, nodemask_policy(gfp, pol));
1542 * fast path: default or task policy
1544 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
1548 * alloc_pages_current - Allocate pages.
1551 * %GFP_USER user allocation,
1552 * %GFP_KERNEL kernel allocation,
1553 * %GFP_HIGHMEM highmem allocation,
1554 * %GFP_FS don't call back into a file system.
1555 * %GFP_ATOMIC don't sleep.
1556 * @order: Power of two of allocation size in pages. 0 is a single page.
1558 * Allocate a page from the kernel page pool. When not in
1559 * interrupt context and apply the current process NUMA policy.
1560 * Returns NULL when no page can be allocated.
1562 * Don't call cpuset_update_task_memory_state() unless
1563 * 1) it's ok to take cpuset_sem (can WAIT), and
1564 * 2) allocating for current task (not interrupt).
1566 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1568 struct mempolicy *pol = current->mempolicy;
1570 if ((gfp & __GFP_WAIT) && !in_interrupt())
1571 cpuset_update_task_memory_state();
1572 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1573 pol = &default_policy;
1574 if (pol->policy == MPOL_INTERLEAVE)
1575 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1576 return __alloc_pages_nodemask(gfp, order,
1577 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
1579 EXPORT_SYMBOL(alloc_pages_current);
1582 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1583 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1584 * with the mems_allowed returned by cpuset_mems_allowed(). This
1585 * keeps mempolicies cpuset relative after its cpuset moves. See
1586 * further kernel/cpuset.c update_nodemask().
1589 /* Slow path of a mempolicy duplicate */
1590 struct mempolicy *__mpol_dup(struct mempolicy *old)
1592 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1595 return ERR_PTR(-ENOMEM);
1596 if (current_cpuset_is_being_rebound()) {
1597 nodemask_t mems = cpuset_mems_allowed(current);
1598 mpol_rebind_policy(old, &mems);
1601 atomic_set(&new->refcnt, 1);
1605 static int mpol_match_intent(const struct mempolicy *a,
1606 const struct mempolicy *b)
1608 if (a->flags != b->flags)
1610 if (!mpol_store_user_nodemask(a))
1612 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1615 /* Slow path of a mempolicy comparison */
1616 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1620 if (a->policy != b->policy)
1622 if (a->policy != MPOL_DEFAULT && !mpol_match_intent(a, b))
1624 switch (a->policy) {
1629 case MPOL_INTERLEAVE:
1630 return nodes_equal(a->v.nodes, b->v.nodes);
1631 case MPOL_PREFERRED:
1632 return a->v.preferred_node == b->v.preferred_node;
1639 /* Slow path of a mpol destructor. */
1640 void __mpol_put(struct mempolicy *p)
1642 if (!atomic_dec_and_test(&p->refcnt))
1644 p->policy = MPOL_DEFAULT;
1645 kmem_cache_free(policy_cache, p);
1649 * Shared memory backing store policy support.
1651 * Remember policies even when nobody has shared memory mapped.
1652 * The policies are kept in Red-Black tree linked from the inode.
1653 * They are protected by the sp->lock spinlock, which should be held
1654 * for any accesses to the tree.
1657 /* lookup first element intersecting start-end */
1658 /* Caller holds sp->lock */
1659 static struct sp_node *
1660 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1662 struct rb_node *n = sp->root.rb_node;
1665 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1667 if (start >= p->end)
1669 else if (end <= p->start)
1677 struct sp_node *w = NULL;
1678 struct rb_node *prev = rb_prev(n);
1681 w = rb_entry(prev, struct sp_node, nd);
1682 if (w->end <= start)
1686 return rb_entry(n, struct sp_node, nd);
1689 /* Insert a new shared policy into the list. */
1690 /* Caller holds sp->lock */
1691 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1693 struct rb_node **p = &sp->root.rb_node;
1694 struct rb_node *parent = NULL;
1699 nd = rb_entry(parent, struct sp_node, nd);
1700 if (new->start < nd->start)
1702 else if (new->end > nd->end)
1703 p = &(*p)->rb_right;
1707 rb_link_node(&new->nd, parent, p);
1708 rb_insert_color(&new->nd, &sp->root);
1709 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1710 new->policy ? new->policy->policy : 0);
1713 /* Find shared policy intersecting idx */
1715 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1717 struct mempolicy *pol = NULL;
1720 if (!sp->root.rb_node)
1722 spin_lock(&sp->lock);
1723 sn = sp_lookup(sp, idx, idx+1);
1725 mpol_get(sn->policy);
1728 spin_unlock(&sp->lock);
1732 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1734 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1735 rb_erase(&n->nd, &sp->root);
1736 mpol_put(n->policy);
1737 kmem_cache_free(sn_cache, n);
1740 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1741 struct mempolicy *pol)
1743 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1754 /* Replace a policy range. */
1755 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1756 unsigned long end, struct sp_node *new)
1758 struct sp_node *n, *new2 = NULL;
1761 spin_lock(&sp->lock);
1762 n = sp_lookup(sp, start, end);
1763 /* Take care of old policies in the same range. */
1764 while (n && n->start < end) {
1765 struct rb_node *next = rb_next(&n->nd);
1766 if (n->start >= start) {
1772 /* Old policy spanning whole new range. */
1775 spin_unlock(&sp->lock);
1776 new2 = sp_alloc(end, n->end, n->policy);
1782 sp_insert(sp, new2);
1790 n = rb_entry(next, struct sp_node, nd);
1794 spin_unlock(&sp->lock);
1796 mpol_put(new2->policy);
1797 kmem_cache_free(sn_cache, new2);
1802 void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
1803 unsigned short flags, nodemask_t *policy_nodes)
1805 info->root = RB_ROOT;
1806 spin_lock_init(&info->lock);
1808 if (policy != MPOL_DEFAULT) {
1809 struct mempolicy *newpol;
1811 /* Falls back to MPOL_DEFAULT on any error */
1812 newpol = mpol_new(policy, flags, policy_nodes);
1813 if (!IS_ERR(newpol)) {
1814 /* Create pseudo-vma that contains just the policy */
1815 struct vm_area_struct pvma;
1817 memset(&pvma, 0, sizeof(struct vm_area_struct));
1818 /* Policy covers entire file */
1819 pvma.vm_end = TASK_SIZE;
1820 mpol_set_shared_policy(info, &pvma, newpol);
1826 int mpol_set_shared_policy(struct shared_policy *info,
1827 struct vm_area_struct *vma, struct mempolicy *npol)
1830 struct sp_node *new = NULL;
1831 unsigned long sz = vma_pages(vma);
1833 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1835 sz, npol ? npol->policy : -1,
1836 npol ? npol->flags : -1,
1837 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1840 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1844 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1846 kmem_cache_free(sn_cache, new);
1850 /* Free a backing policy store on inode delete. */
1851 void mpol_free_shared_policy(struct shared_policy *p)
1854 struct rb_node *next;
1856 if (!p->root.rb_node)
1858 spin_lock(&p->lock);
1859 next = rb_first(&p->root);
1861 n = rb_entry(next, struct sp_node, nd);
1862 next = rb_next(&n->nd);
1863 rb_erase(&n->nd, &p->root);
1864 mpol_put(n->policy);
1865 kmem_cache_free(sn_cache, n);
1867 spin_unlock(&p->lock);
1870 /* assumes fs == KERNEL_DS */
1871 void __init numa_policy_init(void)
1873 nodemask_t interleave_nodes;
1874 unsigned long largest = 0;
1875 int nid, prefer = 0;
1877 policy_cache = kmem_cache_create("numa_policy",
1878 sizeof(struct mempolicy),
1879 0, SLAB_PANIC, NULL);
1881 sn_cache = kmem_cache_create("shared_policy_node",
1882 sizeof(struct sp_node),
1883 0, SLAB_PANIC, NULL);
1886 * Set interleaving policy for system init. Interleaving is only
1887 * enabled across suitably sized nodes (default is >= 16MB), or
1888 * fall back to the largest node if they're all smaller.
1890 nodes_clear(interleave_nodes);
1891 for_each_node_state(nid, N_HIGH_MEMORY) {
1892 unsigned long total_pages = node_present_pages(nid);
1894 /* Preserve the largest node */
1895 if (largest < total_pages) {
1896 largest = total_pages;
1900 /* Interleave this node? */
1901 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1902 node_set(nid, interleave_nodes);
1905 /* All too small, use the largest */
1906 if (unlikely(nodes_empty(interleave_nodes)))
1907 node_set(prefer, interleave_nodes);
1909 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1910 printk("numa_policy_init: interleaving failed\n");
1913 /* Reset policy of current process to default */
1914 void numa_default_policy(void)
1916 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1920 * Display pages allocated per node and memory policy via /proc.
1922 static const char * const policy_types[] =
1923 { "default", "prefer", "bind", "interleave" };
1926 * Convert a mempolicy into a string.
1927 * Returns the number of characters in buffer (if positive)
1928 * or an error (negative)
1930 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1935 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
1936 unsigned short flags = pol ? pol->flags : 0;
1943 case MPOL_PREFERRED:
1945 node_set(pol->v.preferred_node, nodes);
1950 case MPOL_INTERLEAVE:
1951 nodes = pol->v.nodes;
1959 l = strlen(policy_types[mode]);
1960 if (buffer + maxlen < p + l + 1)
1963 strcpy(p, policy_types[mode]);
1969 if (buffer + maxlen < p + 2)
1973 if (flags & MPOL_F_STATIC_NODES)
1974 p += sprintf(p, "%sstatic", need_bar++ ? "|" : "");
1975 if (flags & MPOL_F_RELATIVE_NODES)
1976 p += sprintf(p, "%srelative", need_bar++ ? "|" : "");
1979 if (!nodes_empty(nodes)) {
1980 if (buffer + maxlen < p + 2)
1983 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1989 unsigned long pages;
1991 unsigned long active;
1992 unsigned long writeback;
1993 unsigned long mapcount_max;
1994 unsigned long dirty;
1995 unsigned long swapcache;
1996 unsigned long node[MAX_NUMNODES];
1999 static void gather_stats(struct page *page, void *private, int pte_dirty)
2001 struct numa_maps *md = private;
2002 int count = page_mapcount(page);
2005 if (pte_dirty || PageDirty(page))
2008 if (PageSwapCache(page))
2011 if (PageActive(page))
2014 if (PageWriteback(page))
2020 if (count > md->mapcount_max)
2021 md->mapcount_max = count;
2023 md->node[page_to_nid(page)]++;
2026 #ifdef CONFIG_HUGETLB_PAGE
2027 static void check_huge_range(struct vm_area_struct *vma,
2028 unsigned long start, unsigned long end,
2029 struct numa_maps *md)
2034 for (addr = start; addr < end; addr += HPAGE_SIZE) {
2035 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
2045 page = pte_page(pte);
2049 gather_stats(page, md, pte_dirty(*ptep));
2053 static inline void check_huge_range(struct vm_area_struct *vma,
2054 unsigned long start, unsigned long end,
2055 struct numa_maps *md)
2060 int show_numa_map(struct seq_file *m, void *v)
2062 struct proc_maps_private *priv = m->private;
2063 struct vm_area_struct *vma = v;
2064 struct numa_maps *md;
2065 struct file *file = vma->vm_file;
2066 struct mm_struct *mm = vma->vm_mm;
2067 struct mempolicy *pol;
2074 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2078 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2079 mpol_to_str(buffer, sizeof(buffer), pol);
2081 * unref shared or other task's mempolicy
2083 if (pol != &default_policy && pol != current->mempolicy)
2086 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2089 seq_printf(m, " file=");
2090 seq_path(m, &file->f_path, "\n\t= ");
2091 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2092 seq_printf(m, " heap");
2093 } else if (vma->vm_start <= mm->start_stack &&
2094 vma->vm_end >= mm->start_stack) {
2095 seq_printf(m, " stack");
2098 if (is_vm_hugetlb_page(vma)) {
2099 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2100 seq_printf(m, " huge");
2102 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2103 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2110 seq_printf(m," anon=%lu",md->anon);
2113 seq_printf(m," dirty=%lu",md->dirty);
2115 if (md->pages != md->anon && md->pages != md->dirty)
2116 seq_printf(m, " mapped=%lu", md->pages);
2118 if (md->mapcount_max > 1)
2119 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2122 seq_printf(m," swapcache=%lu", md->swapcache);
2124 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2125 seq_printf(m," active=%lu", md->active);
2128 seq_printf(m," writeback=%lu", md->writeback);
2130 for_each_node_state(n, N_HIGH_MEMORY)
2132 seq_printf(m, " N%d=%lu", n, md->node[n]);
2137 if (m->count < m->size)
2138 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;