4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #define pr_fmt(fmt) "numa: " fmt
13 #include <linux/threads.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
17 #include <linux/mmzone.h>
18 #include <linux/export.h>
19 #include <linux/nodemask.h>
20 #include <linux/cpu.h>
21 #include <linux/notifier.h>
22 #include <linux/memblock.h>
24 #include <linux/pfn.h>
25 #include <linux/cpuset.h>
26 #include <linux/node.h>
27 #include <linux/stop_machine.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <asm/cputhreads.h>
33 #include <asm/sparsemem.h>
36 #include <asm/cputhreads.h>
37 #include <asm/topology.h>
38 #include <asm/firmware.h>
40 #include <asm/hvcall.h>
41 #include <asm/setup.h>
44 static int numa_enabled = 1;
46 static char *cmdline __initdata;
48 static int numa_debug;
49 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
51 int numa_cpu_lookup_table[NR_CPUS];
52 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
53 struct pglist_data *node_data[MAX_NUMNODES];
55 EXPORT_SYMBOL(numa_cpu_lookup_table);
56 EXPORT_SYMBOL(node_to_cpumask_map);
57 EXPORT_SYMBOL(node_data);
59 static int min_common_depth;
60 static int n_mem_addr_cells, n_mem_size_cells;
61 static int form1_affinity;
63 #define MAX_DISTANCE_REF_POINTS 4
64 static int distance_ref_points_depth;
65 static const __be32 *distance_ref_points;
66 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
69 * Allocate node_to_cpumask_map based on number of available nodes
70 * Requires node_possible_map to be valid.
72 * Note: cpumask_of_node() is not valid until after this is done.
74 static void __init setup_node_to_cpumask_map(void)
78 /* setup nr_node_ids if not done yet */
79 if (nr_node_ids == MAX_NUMNODES)
82 /* allocate the map */
83 for (node = 0; node < nr_node_ids; node++)
84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
86 /* cpumask_of_node() will now work */
87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
90 static int __init fake_numa_create_new_node(unsigned long end_pfn,
93 unsigned long long mem;
95 static unsigned int fake_nid;
96 static unsigned long long curr_boundary;
99 * Modify node id, iff we started creating NUMA nodes
100 * We want to continue from where we left of the last time
105 * In case there are no more arguments to parse, the
106 * node_id should be the same as the last fake node id
107 * (we've handled this above).
112 mem = memparse(p, &p);
116 if (mem < curr_boundary)
121 if ((end_pfn << PAGE_SHIFT) > mem) {
123 * Skip commas and spaces
125 while (*p == ',' || *p == ' ' || *p == '\t')
131 dbg("created new fake_node with id %d\n", fake_nid);
138 * get_node_active_region - Return active region containing pfn
139 * Active range returned is empty if none found.
140 * @pfn: The page to return the region for
141 * @node_ar: Returned set to the active region containing @pfn
143 static void __init get_node_active_region(unsigned long pfn,
144 struct node_active_region *node_ar)
146 unsigned long start_pfn, end_pfn;
149 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
150 if (pfn >= start_pfn && pfn < end_pfn) {
152 node_ar->start_pfn = start_pfn;
153 node_ar->end_pfn = end_pfn;
159 static void reset_numa_cpu_lookup_table(void)
163 for_each_possible_cpu(cpu)
164 numa_cpu_lookup_table[cpu] = -1;
167 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
169 numa_cpu_lookup_table[cpu] = node;
172 static void map_cpu_to_node(int cpu, int node)
174 update_numa_cpu_lookup_table(cpu, node);
176 dbg("adding cpu %d to node %d\n", cpu, node);
178 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
179 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
182 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
183 static void unmap_cpu_from_node(unsigned long cpu)
185 int node = numa_cpu_lookup_table[cpu];
187 dbg("removing cpu %lu from node %d\n", cpu, node);
189 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
190 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
192 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
196 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
198 /* must hold reference to node during call */
199 static const __be32 *of_get_associativity(struct device_node *dev)
201 return of_get_property(dev, "ibm,associativity", NULL);
205 * Returns the property linux,drconf-usable-memory if
206 * it exists (the property exists only in kexec/kdump kernels,
207 * added by kexec-tools)
209 static const __be32 *of_get_usable_memory(struct device_node *memory)
213 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
214 if (!prop || len < sizeof(unsigned int))
219 int __node_distance(int a, int b)
222 int distance = LOCAL_DISTANCE;
225 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
227 for (i = 0; i < distance_ref_points_depth; i++) {
228 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
231 /* Double the distance for each NUMA level */
237 EXPORT_SYMBOL(__node_distance);
239 static void initialize_distance_lookup_table(int nid,
240 const __be32 *associativity)
247 for (i = 0; i < distance_ref_points_depth; i++) {
250 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
251 distance_lookup_table[nid][i] = of_read_number(entry, 1);
255 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
258 static int associativity_to_nid(const __be32 *associativity)
262 if (min_common_depth == -1)
265 if (of_read_number(associativity, 1) >= min_common_depth)
266 nid = of_read_number(&associativity[min_common_depth], 1);
268 /* POWER4 LPAR uses 0xffff as invalid node */
269 if (nid == 0xffff || nid >= MAX_NUMNODES)
273 of_read_number(associativity, 1) >= distance_ref_points_depth)
274 initialize_distance_lookup_table(nid, associativity);
280 /* Returns the nid associated with the given device tree node,
281 * or -1 if not found.
283 static int of_node_to_nid_single(struct device_node *device)
288 tmp = of_get_associativity(device);
290 nid = associativity_to_nid(tmp);
294 /* Walk the device tree upwards, looking for an associativity id */
295 int of_node_to_nid(struct device_node *device)
297 struct device_node *tmp;
302 nid = of_node_to_nid_single(device);
307 device = of_get_parent(tmp);
314 EXPORT_SYMBOL_GPL(of_node_to_nid);
316 static int __init find_min_common_depth(void)
319 struct device_node *root;
321 if (firmware_has_feature(FW_FEATURE_OPAL))
322 root = of_find_node_by_path("/ibm,opal");
324 root = of_find_node_by_path("/rtas");
326 root = of_find_node_by_path("/");
329 * This property is a set of 32-bit integers, each representing
330 * an index into the ibm,associativity nodes.
332 * With form 0 affinity the first integer is for an SMP configuration
333 * (should be all 0's) and the second is for a normal NUMA
334 * configuration. We have only one level of NUMA.
336 * With form 1 affinity the first integer is the most significant
337 * NUMA boundary and the following are progressively less significant
338 * boundaries. There can be more than one level of NUMA.
340 distance_ref_points = of_get_property(root,
341 "ibm,associativity-reference-points",
342 &distance_ref_points_depth);
344 if (!distance_ref_points) {
345 dbg("NUMA: ibm,associativity-reference-points not found.\n");
349 distance_ref_points_depth /= sizeof(int);
351 if (firmware_has_feature(FW_FEATURE_OPAL) ||
352 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
353 dbg("Using form 1 affinity\n");
357 if (form1_affinity) {
358 depth = of_read_number(distance_ref_points, 1);
360 if (distance_ref_points_depth < 2) {
361 printk(KERN_WARNING "NUMA: "
362 "short ibm,associativity-reference-points\n");
366 depth = of_read_number(&distance_ref_points[1], 1);
370 * Warn and cap if the hardware supports more than
371 * MAX_DISTANCE_REF_POINTS domains.
373 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
374 printk(KERN_WARNING "NUMA: distance array capped at "
375 "%d entries\n", MAX_DISTANCE_REF_POINTS);
376 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
387 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
389 struct device_node *memory = NULL;
391 memory = of_find_node_by_type(memory, "memory");
393 panic("numa.c: No memory nodes found!");
395 *n_addr_cells = of_n_addr_cells(memory);
396 *n_size_cells = of_n_size_cells(memory);
400 static unsigned long read_n_cells(int n, const __be32 **buf)
402 unsigned long result = 0;
405 result = (result << 32) | of_read_number(*buf, 1);
412 * Read the next memblock list entry from the ibm,dynamic-memory property
413 * and return the information in the provided of_drconf_cell structure.
415 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
419 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
422 drmem->drc_index = of_read_number(cp, 1);
423 drmem->reserved = of_read_number(&cp[1], 1);
424 drmem->aa_index = of_read_number(&cp[2], 1);
425 drmem->flags = of_read_number(&cp[3], 1);
431 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
433 * The layout of the ibm,dynamic-memory property is a number N of memblock
434 * list entries followed by N memblock list entries. Each memblock list entry
435 * contains information as laid out in the of_drconf_cell struct above.
437 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
442 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
443 if (!prop || len < sizeof(unsigned int))
446 entries = of_read_number(prop++, 1);
448 /* Now that we know the number of entries, revalidate the size
449 * of the property read in to ensure we have everything
451 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
459 * Retrieve and validate the ibm,lmb-size property for drconf memory
460 * from the device tree.
462 static u64 of_get_lmb_size(struct device_node *memory)
467 prop = of_get_property(memory, "ibm,lmb-size", &len);
468 if (!prop || len < sizeof(unsigned int))
471 return read_n_cells(n_mem_size_cells, &prop);
474 struct assoc_arrays {
477 const __be32 *arrays;
481 * Retrieve and validate the list of associativity arrays for drconf
482 * memory from the ibm,associativity-lookup-arrays property of the
485 * The layout of the ibm,associativity-lookup-arrays property is a number N
486 * indicating the number of associativity arrays, followed by a number M
487 * indicating the size of each associativity array, followed by a list
488 * of N associativity arrays.
490 static int of_get_assoc_arrays(struct device_node *memory,
491 struct assoc_arrays *aa)
496 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
497 if (!prop || len < 2 * sizeof(unsigned int))
500 aa->n_arrays = of_read_number(prop++, 1);
501 aa->array_sz = of_read_number(prop++, 1);
503 /* Now that we know the number of arrays and size of each array,
504 * revalidate the size of the property read in.
506 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
514 * This is like of_node_to_nid_single() for memory represented in the
515 * ibm,dynamic-reconfiguration-memory node.
517 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
518 struct assoc_arrays *aa)
521 int nid = default_nid;
524 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
525 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
526 drmem->aa_index < aa->n_arrays) {
527 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
528 nid = of_read_number(&aa->arrays[index], 1);
530 if (nid == 0xffff || nid >= MAX_NUMNODES)
538 * Figure out to which domain a cpu belongs and stick it there.
539 * Return the id of the domain used.
541 static int numa_setup_cpu(unsigned long lcpu)
544 struct device_node *cpu;
547 * If a valid cpu-to-node mapping is already available, use it
548 * directly instead of querying the firmware, since it represents
549 * the most recent mapping notified to us by the platform (eg: VPHN).
551 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
552 map_cpu_to_node(lcpu, nid);
556 cpu = of_get_cpu_node(lcpu, NULL);
560 if (cpu_present(lcpu))
566 nid = of_node_to_nid_single(cpu);
569 if (nid < 0 || !node_online(nid))
570 nid = first_online_node;
572 map_cpu_to_node(lcpu, nid);
578 static void verify_cpu_node_mapping(int cpu, int node)
580 int base, sibling, i;
582 /* Verify that all the threads in the core belong to the same node */
583 base = cpu_first_thread_sibling(cpu);
585 for (i = 0; i < threads_per_core; i++) {
588 if (sibling == cpu || cpu_is_offline(sibling))
591 if (cpu_to_node(sibling) != node) {
592 WARN(1, "CPU thread siblings %d and %d don't belong"
593 " to the same node!\n", cpu, sibling);
599 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
602 unsigned long lcpu = (unsigned long)hcpu;
603 int ret = NOTIFY_DONE, nid;
607 case CPU_UP_PREPARE_FROZEN:
608 nid = numa_setup_cpu(lcpu);
609 verify_cpu_node_mapping((int)lcpu, nid);
612 #ifdef CONFIG_HOTPLUG_CPU
614 case CPU_DEAD_FROZEN:
615 case CPU_UP_CANCELED:
616 case CPU_UP_CANCELED_FROZEN:
617 unmap_cpu_from_node(lcpu);
626 * Check and possibly modify a memory region to enforce the memory limit.
628 * Returns the size the region should have to enforce the memory limit.
629 * This will either be the original value of size, a truncated value,
630 * or zero. If the returned value of size is 0 the region should be
631 * discarded as it lies wholly above the memory limit.
633 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
637 * We use memblock_end_of_DRAM() in here instead of memory_limit because
638 * we've already adjusted it for the limit and it takes care of
639 * having memory holes below the limit. Also, in the case of
640 * iommu_is_off, memory_limit is not set but is implicitly enforced.
643 if (start + size <= memblock_end_of_DRAM())
646 if (start >= memblock_end_of_DRAM())
649 return memblock_end_of_DRAM() - start;
653 * Reads the counter for a given entry in
654 * linux,drconf-usable-memory property
656 static inline int __init read_usm_ranges(const __be32 **usm)
659 * For each lmb in ibm,dynamic-memory a corresponding
660 * entry in linux,drconf-usable-memory property contains
661 * a counter followed by that many (base, size) duple.
662 * read the counter from linux,drconf-usable-memory
664 return read_n_cells(n_mem_size_cells, usm);
668 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
669 * node. This assumes n_mem_{addr,size}_cells have been set.
671 static void __init parse_drconf_memory(struct device_node *memory)
673 const __be32 *uninitialized_var(dm), *usm;
674 unsigned int n, rc, ranges, is_kexec_kdump = 0;
675 unsigned long lmb_size, base, size, sz;
677 struct assoc_arrays aa = { .arrays = NULL };
679 n = of_get_drconf_memory(memory, &dm);
683 lmb_size = of_get_lmb_size(memory);
687 rc = of_get_assoc_arrays(memory, &aa);
691 /* check if this is a kexec/kdump kernel */
692 usm = of_get_usable_memory(memory);
696 for (; n != 0; --n) {
697 struct of_drconf_cell drmem;
699 read_drconf_cell(&drmem, &dm);
701 /* skip this block if the reserved bit is set in flags (0x80)
702 or if the block is not assigned to this partition (0x8) */
703 if ((drmem.flags & DRCONF_MEM_RESERVED)
704 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
707 base = drmem.base_addr;
711 if (is_kexec_kdump) {
712 ranges = read_usm_ranges(&usm);
713 if (!ranges) /* there are no (base, size) duple */
717 if (is_kexec_kdump) {
718 base = read_n_cells(n_mem_addr_cells, &usm);
719 size = read_n_cells(n_mem_size_cells, &usm);
721 nid = of_drconf_to_nid_single(&drmem, &aa);
722 fake_numa_create_new_node(
723 ((base + size) >> PAGE_SHIFT),
725 node_set_online(nid);
726 sz = numa_enforce_memory_limit(base, size);
728 memblock_set_node(base, sz,
729 &memblock.memory, nid);
734 static int __init parse_numa_properties(void)
736 struct device_node *memory;
740 if (numa_enabled == 0) {
741 printk(KERN_WARNING "NUMA disabled by user\n");
745 min_common_depth = find_min_common_depth();
747 if (min_common_depth < 0)
748 return min_common_depth;
750 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
753 * Even though we connect cpus to numa domains later in SMP
754 * init, we need to know the node ids now. This is because
755 * each node to be onlined must have NODE_DATA etc backing it.
757 for_each_present_cpu(i) {
758 struct device_node *cpu;
761 cpu = of_get_cpu_node(i, NULL);
763 nid = of_node_to_nid_single(cpu);
767 * Don't fall back to default_nid yet -- we will plug
768 * cpus into nodes once the memory scan has discovered
773 node_set_online(nid);
776 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
778 for_each_node_by_type(memory, "memory") {
783 const __be32 *memcell_buf;
786 memcell_buf = of_get_property(memory,
787 "linux,usable-memory", &len);
788 if (!memcell_buf || len <= 0)
789 memcell_buf = of_get_property(memory, "reg", &len);
790 if (!memcell_buf || len <= 0)
794 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
796 /* these are order-sensitive, and modify the buffer pointer */
797 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
798 size = read_n_cells(n_mem_size_cells, &memcell_buf);
801 * Assumption: either all memory nodes or none will
802 * have associativity properties. If none, then
803 * everything goes to default_nid.
805 nid = of_node_to_nid_single(memory);
809 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
810 node_set_online(nid);
812 if (!(size = numa_enforce_memory_limit(start, size))) {
819 memblock_set_node(start, size, &memblock.memory, nid);
826 * Now do the same thing for each MEMBLOCK listed in the
827 * ibm,dynamic-memory property in the
828 * ibm,dynamic-reconfiguration-memory node.
830 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
832 parse_drconf_memory(memory);
837 static void __init setup_nonnuma(void)
839 unsigned long top_of_ram = memblock_end_of_DRAM();
840 unsigned long total_ram = memblock_phys_mem_size();
841 unsigned long start_pfn, end_pfn;
842 unsigned int nid = 0;
843 struct memblock_region *reg;
845 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
846 top_of_ram, total_ram);
847 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
848 (top_of_ram - total_ram) >> 20);
850 for_each_memblock(memory, reg) {
851 start_pfn = memblock_region_memory_base_pfn(reg);
852 end_pfn = memblock_region_memory_end_pfn(reg);
854 fake_numa_create_new_node(end_pfn, &nid);
855 memblock_set_node(PFN_PHYS(start_pfn),
856 PFN_PHYS(end_pfn - start_pfn),
857 &memblock.memory, nid);
858 node_set_online(nid);
862 void __init dump_numa_cpu_topology(void)
865 unsigned int cpu, count;
867 if (min_common_depth == -1 || !numa_enabled)
870 for_each_online_node(node) {
871 printk(KERN_DEBUG "Node %d CPUs:", node);
875 * If we used a CPU iterator here we would miss printing
876 * the holes in the cpumap.
878 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
879 if (cpumask_test_cpu(cpu,
880 node_to_cpumask_map[node])) {
886 printk("-%u", cpu - 1);
892 printk("-%u", nr_cpu_ids - 1);
897 static void __init dump_numa_memory_topology(void)
902 if (min_common_depth == -1 || !numa_enabled)
905 for_each_online_node(node) {
908 printk(KERN_DEBUG "Node %d Memory:", node);
912 for (i = 0; i < memblock_end_of_DRAM();
913 i += (1 << SECTION_SIZE_BITS)) {
914 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
932 * Allocate some memory, satisfying the memblock or bootmem allocator where
933 * required. nid is the preferred node and end is the physical address of
934 * the highest address in the node.
936 * Returns the virtual address of the memory.
938 static void __init *careful_zallocation(int nid, unsigned long size,
940 unsigned long end_pfn)
944 unsigned long ret_paddr;
946 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
948 /* retry over all memory */
950 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
953 panic("numa.c: cannot allocate %lu bytes for node %d",
956 ret = __va(ret_paddr);
959 * We initialize the nodes in numeric order: 0, 1, 2...
960 * and hand over control from the MEMBLOCK allocator to the
961 * bootmem allocator. If this function is called for
962 * node 5, then we know that all nodes <5 are using the
963 * bootmem allocator instead of the MEMBLOCK allocator.
965 * So, check the nid from which this allocation came
966 * and double check to see if we need to use bootmem
967 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
968 * since it would be useless.
970 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
972 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
975 dbg("alloc_bootmem %p %lx\n", ret, size);
978 memset(ret, 0, size);
982 static struct notifier_block ppc64_numa_nb = {
983 .notifier_call = cpu_numa_callback,
984 .priority = 1 /* Must run before sched domains notifier. */
987 static void __init mark_reserved_regions_for_nid(int nid)
989 struct pglist_data *node = NODE_DATA(nid);
990 struct memblock_region *reg;
992 for_each_memblock(reserved, reg) {
993 unsigned long physbase = reg->base;
994 unsigned long size = reg->size;
995 unsigned long start_pfn = physbase >> PAGE_SHIFT;
996 unsigned long end_pfn = PFN_UP(physbase + size);
997 struct node_active_region node_ar;
998 unsigned long node_end_pfn = pgdat_end_pfn(node);
1001 * Check to make sure that this memblock.reserved area is
1002 * within the bounds of the node that we care about.
1003 * Checking the nid of the start and end points is not
1004 * sufficient because the reserved area could span the
1007 if (end_pfn <= node->node_start_pfn ||
1008 start_pfn >= node_end_pfn)
1011 get_node_active_region(start_pfn, &node_ar);
1012 while (start_pfn < end_pfn &&
1013 node_ar.start_pfn < node_ar.end_pfn) {
1014 unsigned long reserve_size = size;
1016 * if reserved region extends past active region
1017 * then trim size to active region
1019 if (end_pfn > node_ar.end_pfn)
1020 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1023 * Only worry about *this* node, others may not
1024 * yet have valid NODE_DATA().
1026 if (node_ar.nid == nid) {
1027 dbg("reserve_bootmem %lx %lx nid=%d\n",
1028 physbase, reserve_size, node_ar.nid);
1029 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1030 physbase, reserve_size,
1034 * if reserved region is contained in the active region
1037 if (end_pfn <= node_ar.end_pfn)
1041 * reserved region extends past the active region
1042 * get next active region that contains this
1045 start_pfn = node_ar.end_pfn;
1046 physbase = start_pfn << PAGE_SHIFT;
1047 size = size - reserve_size;
1048 get_node_active_region(start_pfn, &node_ar);
1054 void __init do_init_bootmem(void)
1059 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1060 max_pfn = max_low_pfn;
1062 if (parse_numa_properties())
1065 dump_numa_memory_topology();
1067 for_each_online_node(nid) {
1068 unsigned long start_pfn, end_pfn;
1069 void *bootmem_vaddr;
1070 unsigned long bootmap_pages;
1072 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1075 * Allocate the node structure node local if possible
1077 * Be careful moving this around, as it relies on all
1078 * previous nodes' bootmem to be initialized and have
1079 * all reserved areas marked.
1081 NODE_DATA(nid) = careful_zallocation(nid,
1082 sizeof(struct pglist_data),
1083 SMP_CACHE_BYTES, end_pfn);
1085 dbg("node %d\n", nid);
1086 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1088 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1089 NODE_DATA(nid)->node_start_pfn = start_pfn;
1090 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1092 if (NODE_DATA(nid)->node_spanned_pages == 0)
1095 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1096 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1098 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1099 bootmem_vaddr = careful_zallocation(nid,
1100 bootmap_pages << PAGE_SHIFT,
1101 PAGE_SIZE, end_pfn);
1103 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1105 init_bootmem_node(NODE_DATA(nid),
1106 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1107 start_pfn, end_pfn);
1109 free_bootmem_with_active_regions(nid, end_pfn);
1111 * Be very careful about moving this around. Future
1112 * calls to careful_zallocation() depend on this getting
1115 mark_reserved_regions_for_nid(nid);
1116 sparse_memory_present_with_active_regions(nid);
1119 init_bootmem_done = 1;
1122 * Now bootmem is initialised we can create the node to cpumask
1123 * lookup tables and setup the cpu callback to populate them.
1125 setup_node_to_cpumask_map();
1127 reset_numa_cpu_lookup_table();
1128 register_cpu_notifier(&ppc64_numa_nb);
1130 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1131 * even before we online them, so that we can use cpu_to_{node,mem}
1132 * early in boot, cf. smp_prepare_cpus().
1134 for_each_present_cpu(cpu) {
1135 numa_setup_cpu((unsigned long)cpu);
1139 static int __init early_numa(char *p)
1144 if (strstr(p, "off"))
1147 if (strstr(p, "debug"))
1150 p = strstr(p, "fake=");
1152 cmdline = p + strlen("fake=");
1156 early_param("numa", early_numa);
1158 static bool topology_updates_enabled = true;
1160 static int __init early_topology_updates(char *p)
1165 if (!strcmp(p, "off")) {
1166 pr_info("Disabling topology updates\n");
1167 topology_updates_enabled = false;
1172 early_param("topology_updates", early_topology_updates);
1174 #ifdef CONFIG_MEMORY_HOTPLUG
1176 * Find the node associated with a hot added memory section for
1177 * memory represented in the device tree by the property
1178 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1180 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1181 unsigned long scn_addr)
1184 unsigned int drconf_cell_cnt, rc;
1185 unsigned long lmb_size;
1186 struct assoc_arrays aa;
1189 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1190 if (!drconf_cell_cnt)
1193 lmb_size = of_get_lmb_size(memory);
1197 rc = of_get_assoc_arrays(memory, &aa);
1201 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1202 struct of_drconf_cell drmem;
1204 read_drconf_cell(&drmem, &dm);
1206 /* skip this block if it is reserved or not assigned to
1208 if ((drmem.flags & DRCONF_MEM_RESERVED)
1209 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1212 if ((scn_addr < drmem.base_addr)
1213 || (scn_addr >= (drmem.base_addr + lmb_size)))
1216 nid = of_drconf_to_nid_single(&drmem, &aa);
1224 * Find the node associated with a hot added memory section for memory
1225 * represented in the device tree as a node (i.e. memory@XXXX) for
1228 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1230 struct device_node *memory;
1233 for_each_node_by_type(memory, "memory") {
1234 unsigned long start, size;
1236 const __be32 *memcell_buf;
1239 memcell_buf = of_get_property(memory, "reg", &len);
1240 if (!memcell_buf || len <= 0)
1243 /* ranges in cell */
1244 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1247 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1248 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1250 if ((scn_addr < start) || (scn_addr >= (start + size)))
1253 nid = of_node_to_nid_single(memory);
1261 of_node_put(memory);
1267 * Find the node associated with a hot added memory section. Section
1268 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1269 * sections are fully contained within a single MEMBLOCK.
1271 int hot_add_scn_to_nid(unsigned long scn_addr)
1273 struct device_node *memory = NULL;
1276 if (!numa_enabled || (min_common_depth < 0))
1277 return first_online_node;
1279 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1281 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1282 of_node_put(memory);
1284 nid = hot_add_node_scn_to_nid(scn_addr);
1287 if (nid < 0 || !node_online(nid))
1288 nid = first_online_node;
1290 if (NODE_DATA(nid)->node_spanned_pages)
1293 for_each_online_node(nid) {
1294 if (NODE_DATA(nid)->node_spanned_pages) {
1304 static u64 hot_add_drconf_memory_max(void)
1306 struct device_node *memory = NULL;
1307 unsigned int drconf_cell_cnt = 0;
1309 const __be32 *dm = NULL;
1311 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1313 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1314 lmb_size = of_get_lmb_size(memory);
1315 of_node_put(memory);
1317 return lmb_size * drconf_cell_cnt;
1321 * memory_hotplug_max - return max address of memory that may be added
1323 * This is currently only used on systems that support drconfig memory
1326 u64 memory_hotplug_max(void)
1328 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1330 #endif /* CONFIG_MEMORY_HOTPLUG */
1332 /* Virtual Processor Home Node (VPHN) support */
1333 #ifdef CONFIG_PPC_SPLPAR
1334 struct topology_update_data {
1335 struct topology_update_data *next;
1341 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1342 static cpumask_t cpu_associativity_changes_mask;
1343 static int vphn_enabled;
1344 static int prrn_enabled;
1345 static void reset_topology_timer(void);
1348 * Store the current values of the associativity change counters in the
1351 static void setup_cpu_associativity_change_counters(void)
1355 /* The VPHN feature supports a maximum of 8 reference points */
1356 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1358 for_each_possible_cpu(cpu) {
1360 u8 *counts = vphn_cpu_change_counts[cpu];
1361 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1363 for (i = 0; i < distance_ref_points_depth; i++)
1364 counts[i] = hypervisor_counts[i];
1369 * The hypervisor maintains a set of 8 associativity change counters in
1370 * the VPA of each cpu that correspond to the associativity levels in the
1371 * ibm,associativity-reference-points property. When an associativity
1372 * level changes, the corresponding counter is incremented.
1374 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1375 * node associativity levels have changed.
1377 * Returns the number of cpus with unhandled associativity changes.
1379 static int update_cpu_associativity_changes_mask(void)
1382 cpumask_t *changes = &cpu_associativity_changes_mask;
1384 for_each_possible_cpu(cpu) {
1386 u8 *counts = vphn_cpu_change_counts[cpu];
1387 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1389 for (i = 0; i < distance_ref_points_depth; i++) {
1390 if (hypervisor_counts[i] != counts[i]) {
1391 counts[i] = hypervisor_counts[i];
1396 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1397 cpu = cpu_last_thread_sibling(cpu);
1401 return cpumask_weight(changes);
1405 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1406 * the complete property we have to add the length in the first cell.
1408 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1411 * Convert the associativity domain numbers returned from the hypervisor
1412 * to the sequence they would appear in the ibm,associativity property.
1414 static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1416 int i, nr_assoc_doms = 0;
1417 const __be16 *field = (const __be16 *) packed;
1419 #define VPHN_FIELD_UNUSED (0xffff)
1420 #define VPHN_FIELD_MSB (0x8000)
1421 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1423 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1424 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1425 /* All significant fields processed, and remaining
1426 * fields contain the reserved value of all 1's.
1429 unpacked[i] = *((__be32 *)field);
1431 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1432 /* Data is in the lower 15 bits of this field */
1433 unpacked[i] = cpu_to_be32(
1434 be16_to_cpup(field) & VPHN_FIELD_MASK);
1438 /* Data is in the lower 15 bits of this field
1439 * concatenated with the next 16 bit field
1441 unpacked[i] = *((__be32 *)field);
1447 /* The first cell contains the length of the property */
1448 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1450 return nr_assoc_doms;
1454 * Retrieve the new associativity information for a virtual processor's
1457 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1460 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1462 int hwcpu = get_hard_smp_processor_id(cpu);
1465 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1466 for (i = 0; i < 6; i++)
1467 retbuf[i] = cpu_to_be64(retbuf[i]);
1468 vphn_unpack_associativity(retbuf, associativity);
1473 static long vphn_get_associativity(unsigned long cpu,
1474 __be32 *associativity)
1478 rc = hcall_vphn(cpu, associativity);
1483 "VPHN is not supported. Disabling polling...\n");
1484 stop_topology_update();
1488 "hcall_vphn() experienced a hardware fault "
1489 "preventing VPHN. Disabling polling...\n");
1490 stop_topology_update();
1497 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1498 * characteristics change. This function doesn't perform any locking and is
1499 * only safe to call from stop_machine().
1501 static int update_cpu_topology(void *data)
1503 struct topology_update_data *update;
1509 cpu = smp_processor_id();
1511 for (update = data; update; update = update->next) {
1512 if (cpu != update->cpu)
1515 unmap_cpu_from_node(update->cpu);
1516 map_cpu_to_node(update->cpu, update->new_nid);
1523 static int update_lookup_table(void *data)
1525 struct topology_update_data *update;
1531 * Upon topology update, the numa-cpu lookup table needs to be updated
1532 * for all threads in the core, including offline CPUs, to ensure that
1533 * future hotplug operations respect the cpu-to-node associativity
1536 for (update = data; update; update = update->next) {
1539 nid = update->new_nid;
1540 base = cpu_first_thread_sibling(update->cpu);
1542 for (j = 0; j < threads_per_core; j++) {
1543 update_numa_cpu_lookup_table(base + j, nid);
1551 * Update the node maps and sysfs entries for each cpu whose home node
1552 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1554 int arch_update_cpu_topology(void)
1556 unsigned int cpu, sibling, changed = 0;
1557 struct topology_update_data *updates, *ud;
1558 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1559 cpumask_t updated_cpus;
1561 int weight, new_nid, i = 0;
1563 if (!prrn_enabled && !vphn_enabled)
1566 weight = cpumask_weight(&cpu_associativity_changes_mask);
1570 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1574 cpumask_clear(&updated_cpus);
1576 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1578 * If siblings aren't flagged for changes, updates list
1579 * will be too short. Skip on this update and set for next
1582 if (!cpumask_subset(cpu_sibling_mask(cpu),
1583 &cpu_associativity_changes_mask)) {
1584 pr_info("Sibling bits not set for associativity "
1585 "change, cpu%d\n", cpu);
1586 cpumask_or(&cpu_associativity_changes_mask,
1587 &cpu_associativity_changes_mask,
1588 cpu_sibling_mask(cpu));
1589 cpu = cpu_last_thread_sibling(cpu);
1593 /* Use associativity from first thread for all siblings */
1594 vphn_get_associativity(cpu, associativity);
1595 new_nid = associativity_to_nid(associativity);
1596 if (new_nid < 0 || !node_online(new_nid))
1597 new_nid = first_online_node;
1599 if (new_nid == numa_cpu_lookup_table[cpu]) {
1600 cpumask_andnot(&cpu_associativity_changes_mask,
1601 &cpu_associativity_changes_mask,
1602 cpu_sibling_mask(cpu));
1603 cpu = cpu_last_thread_sibling(cpu);
1607 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1610 ud->new_nid = new_nid;
1611 ud->old_nid = numa_cpu_lookup_table[sibling];
1612 cpumask_set_cpu(sibling, &updated_cpus);
1614 ud->next = &updates[i];
1616 cpu = cpu_last_thread_sibling(cpu);
1619 pr_debug("Topology update for the following CPUs:\n");
1620 if (cpumask_weight(&updated_cpus)) {
1621 for (ud = &updates[0]; ud; ud = ud->next) {
1622 pr_debug("cpu %d moving from node %d "
1624 ud->old_nid, ud->new_nid);
1629 * In cases where we have nothing to update (because the updates list
1630 * is too short or because the new topology is same as the old one),
1631 * skip invoking update_cpu_topology() via stop-machine(). This is
1632 * necessary (and not just a fast-path optimization) since stop-machine
1633 * can end up electing a random CPU to run update_cpu_topology(), and
1634 * thus trick us into setting up incorrect cpu-node mappings (since
1635 * 'updates' is kzalloc()'ed).
1637 * And for the similar reason, we will skip all the following updating.
1639 if (!cpumask_weight(&updated_cpus))
1642 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1645 * Update the numa-cpu lookup table with the new mappings, even for
1646 * offline CPUs. It is best to perform this update from the stop-
1649 stop_machine(update_lookup_table, &updates[0],
1650 cpumask_of(raw_smp_processor_id()));
1652 for (ud = &updates[0]; ud; ud = ud->next) {
1653 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1654 register_cpu_under_node(ud->cpu, ud->new_nid);
1656 dev = get_cpu_device(ud->cpu);
1658 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1659 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1668 static void topology_work_fn(struct work_struct *work)
1670 rebuild_sched_domains();
1672 static DECLARE_WORK(topology_work, topology_work_fn);
1674 static void topology_schedule_update(void)
1676 schedule_work(&topology_work);
1679 static void topology_timer_fn(unsigned long ignored)
1681 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1682 topology_schedule_update();
1683 else if (vphn_enabled) {
1684 if (update_cpu_associativity_changes_mask() > 0)
1685 topology_schedule_update();
1686 reset_topology_timer();
1689 static struct timer_list topology_timer =
1690 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1692 static void reset_topology_timer(void)
1694 topology_timer.data = 0;
1695 topology_timer.expires = jiffies + 60 * HZ;
1696 mod_timer(&topology_timer, topology_timer.expires);
1701 static void stage_topology_update(int core_id)
1703 cpumask_or(&cpu_associativity_changes_mask,
1704 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1705 reset_topology_timer();
1708 static int dt_update_callback(struct notifier_block *nb,
1709 unsigned long action, void *data)
1711 struct of_prop_reconfig *update;
1712 int rc = NOTIFY_DONE;
1715 case OF_RECONFIG_UPDATE_PROPERTY:
1716 update = (struct of_prop_reconfig *)data;
1717 if (!of_prop_cmp(update->dn->type, "cpu") &&
1718 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1720 of_property_read_u32(update->dn, "reg", &core_id);
1721 stage_topology_update(core_id);
1730 static struct notifier_block dt_update_nb = {
1731 .notifier_call = dt_update_callback,
1737 * Start polling for associativity changes.
1739 int start_topology_update(void)
1743 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1744 if (!prrn_enabled) {
1748 rc = of_reconfig_notifier_register(&dt_update_nb);
1751 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1752 lppaca_shared_proc(get_lppaca())) {
1753 if (!vphn_enabled) {
1756 setup_cpu_associativity_change_counters();
1757 init_timer_deferrable(&topology_timer);
1758 reset_topology_timer();
1766 * Disable polling for VPHN associativity changes.
1768 int stop_topology_update(void)
1775 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1777 } else if (vphn_enabled) {
1779 rc = del_timer_sync(&topology_timer);
1785 int prrn_is_enabled(void)
1787 return prrn_enabled;
1790 static int topology_read(struct seq_file *file, void *v)
1792 if (vphn_enabled || prrn_enabled)
1793 seq_puts(file, "on\n");
1795 seq_puts(file, "off\n");
1800 static int topology_open(struct inode *inode, struct file *file)
1802 return single_open(file, topology_read, NULL);
1805 static ssize_t topology_write(struct file *file, const char __user *buf,
1806 size_t count, loff_t *off)
1808 char kbuf[4]; /* "on" or "off" plus null. */
1811 read_len = count < 3 ? count : 3;
1812 if (copy_from_user(kbuf, buf, read_len))
1815 kbuf[read_len] = '\0';
1817 if (!strncmp(kbuf, "on", 2))
1818 start_topology_update();
1819 else if (!strncmp(kbuf, "off", 3))
1820 stop_topology_update();
1827 static const struct file_operations topology_ops = {
1829 .write = topology_write,
1830 .open = topology_open,
1831 .release = single_release
1834 static int topology_update_init(void)
1836 /* Do not poll for changes if disabled at boot */
1837 if (topology_updates_enabled)
1838 start_topology_update();
1840 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1845 device_initcall(topology_update_init);
1846 #endif /* CONFIG_PPC_SPLPAR */