2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
16 #include <asm/proto.h>
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 struct memnode memnode;
30 unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
33 unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
36 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
38 int numa_off __initdata;
39 unsigned long __initdata nodemap_addr;
40 unsigned long __initdata nodemap_size;
44 * Given a shift value, try to populate memnodemap[]
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
51 populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
55 unsigned long addr, end;
57 memset(memnodemap, 0xff, memnodemapsize);
58 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
63 if ((end >> shift) >= memnodemapsize)
66 if (memnodemap[addr >> shift] != 0xff)
68 memnodemap[addr >> shift] = i;
69 addr += (1UL << shift);
76 static int __init allocate_cachealigned_memnodemap(void)
78 unsigned long pad, pad_addr;
80 memnodemap = memnode.embedded_map;
81 if (memnodemapsize <= 48)
84 pad = L1_CACHE_BYTES - 1;
86 nodemap_size = pad + memnodemapsize;
87 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
89 if (nodemap_addr == -1UL) {
91 "NUMA: Unable to allocate Memory to Node hash map\n");
92 nodemap_addr = nodemap_size = 0;
95 pad_addr = (nodemap_addr + pad) & ~pad;
96 memnodemap = phys_to_virt(pad_addr);
98 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
99 nodemap_addr, nodemap_addr + nodemap_size);
104 * The LSB of all start and end addresses in the node map is the value of the
105 * maximum possible shift.
108 extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
110 int i, nodes_used = 0;
111 unsigned long start, end;
112 unsigned long bitfield = 0, memtop = 0;
114 for (i = 0; i < numnodes; i++) {
115 start = nodes[i].start;
127 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
128 memnodemapsize = (memtop >> i)+1;
132 int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
136 shift = extract_lsb_from_nodes(nodes, numnodes);
137 if (allocate_cachealigned_memnodemap())
139 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
142 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
144 "Your memory is not aligned you need to rebuild your kernel "
145 "with a bigger NODEMAPSIZE shift=%d\n",
152 #ifdef CONFIG_SPARSEMEM
153 int early_pfn_to_nid(unsigned long pfn)
155 return phys_to_nid(pfn << PAGE_SHIFT);
160 early_node_mem(int nodeid, unsigned long start, unsigned long end,
163 unsigned long mem = find_e820_area(start, end, size);
167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
177 /* Initialize bootmem allocator for a node */
178 void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
180 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
181 unsigned long nodedata_phys;
183 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
185 start = round_up(start, ZONE_ALIGN);
187 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
189 start_pfn = start >> PAGE_SHIFT;
190 end_pfn = end >> PAGE_SHIFT;
192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
193 if (node_data[nodeid] == NULL)
195 nodedata_phys = __pa(node_data[nodeid]);
197 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
198 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
199 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
200 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
202 /* Find a place for the bootmem map */
203 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
204 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
205 bootmap = early_node_mem(nodeid, bootmap_start, end,
206 bootmap_pages<<PAGE_SHIFT);
207 if (bootmap == NULL) {
208 if (nodedata_phys < start || nodedata_phys >= end)
209 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
210 node_data[nodeid] = NULL;
213 bootmap_start = __pa(bootmap);
214 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
216 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
217 bootmap_start >> PAGE_SHIFT,
220 free_bootmem_with_active_regions(nodeid, end);
222 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
223 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
224 #ifdef CONFIG_ACPI_NUMA
225 srat_reserve_add_area(nodeid);
227 node_set_online(nodeid);
230 /* Initialize final allocator for a zone */
231 void __init setup_node_zones(int nodeid)
233 unsigned long start_pfn, end_pfn, memmapsize, limit;
235 start_pfn = node_start_pfn(nodeid);
236 end_pfn = node_end_pfn(nodeid);
238 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
239 nodeid, start_pfn, end_pfn);
241 /* Try to allocate mem_map at end to not fill up precious <4GB
243 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
244 limit = end_pfn << PAGE_SHIFT;
245 #ifdef CONFIG_FLAT_NODE_MEM_MAP
246 NODE_DATA(nodeid)->node_mem_map =
247 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
248 memmapsize, SMP_CACHE_BYTES,
249 round_down(limit - memmapsize, PAGE_SIZE),
254 void __init numa_init_array(void)
257 /* There are unfortunately some poorly designed mainboards around
258 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
259 mapping. To avoid this fill in the mapping for all possible
260 CPUs, as the number of CPUs is not known yet.
261 We round robin the existing nodes. */
262 rr = first_node(node_online_map);
263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE)
266 numa_set_node(i, rr);
267 rr = next_node(rr, node_online_map);
268 if (rr == MAX_NUMNODES)
269 rr = first_node(node_online_map);
274 #ifdef CONFIG_NUMA_EMU
276 int numa_fake __initdata = 0;
279 * This function is used to find out if the start and end correspond to
282 int zone_cross_over(unsigned long start, unsigned long end)
284 if ((start < (MAX_DMA32_PFN << PAGE_SHIFT)) &&
285 (end >= (MAX_DMA32_PFN << PAGE_SHIFT)))
290 static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
293 struct bootnode nodes[MAX_NUMNODES];
294 unsigned long sz, old_sz;
295 unsigned long hole_size;
296 unsigned long start, end;
297 unsigned long max_addr = (end_pfn << PAGE_SHIFT);
299 start = (start_pfn << PAGE_SHIFT);
300 hole_size = e820_hole_size(start, max_addr);
301 sz = (max_addr - start - hole_size) / numa_fake;
303 /* Kludge needed for the hash function */
307 * Round down to the nearest FAKE_NODE_MIN_SIZE.
309 sz &= FAKE_NODE_MIN_HASH_MASK;
312 * We ensure that each node is at least 64MB big. Smaller than this
313 * size can cause VM hiccups.
316 printk(KERN_INFO "Not enough memory for %d nodes. Reducing "
317 "the number of nodes\n", numa_fake);
318 numa_fake = (max_addr - start - hole_size) / FAKE_NODE_MIN_SIZE;
319 printk(KERN_INFO "Number of fake nodes will be = %d\n",
321 sz = FAKE_NODE_MIN_SIZE;
324 * Find out how many nodes can get an extra NODE_MIN_SIZE granule.
325 * This logic ensures the extra memory gets distributed among as many
326 * nodes as possible (as compared to one single node getting all that
329 big = ((old_sz - sz) * numa_fake) / FAKE_NODE_MIN_SIZE;
330 printk(KERN_INFO "Fake node Size: %luMB hole_size: %luMB big nodes: "
332 (sz >> 20), (hole_size >> 20), big);
333 memset(&nodes,0,sizeof(nodes));
335 for (i = 0; i < numa_fake; i++) {
337 * In case we are not able to allocate enough memory for all
338 * the nodes, we reduce the number of fake nodes.
340 if (end >= max_addr) {
344 start = nodes[i].start = end;
346 * Final node can have all the remaining memory.
348 if (i == numa_fake-1)
349 sz = max_addr - start;
350 end = nodes[i].start + sz;
352 * Fir "big" number of nodes get extra granule.
355 end += FAKE_NODE_MIN_SIZE;
357 * Iterate over the range to ensure that this node gets at
358 * least sz amount of RAM (excluding holes)
360 while ((end - start - e820_hole_size(start, end)) < sz) {
361 end += FAKE_NODE_MIN_SIZE;
366 * Look at the next node to make sure there is some real memory
367 * to map. Bad things happen when the only memory present
368 * in a zone on a fake node is IO hole.
370 while (e820_hole_size(end, end + FAKE_NODE_MIN_SIZE) > 0) {
371 if (zone_cross_over(start, end + sz)) {
372 end = (MAX_DMA32_PFN << PAGE_SHIFT);
377 end += FAKE_NODE_MIN_SIZE;
382 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
384 nodes[i].start, nodes[i].end,
385 (nodes[i].end - nodes[i].start) >> 20);
388 memnode_shift = compute_hash_shift(nodes, numa_fake);
389 if (memnode_shift < 0) {
391 printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
394 for_each_online_node(i) {
395 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
396 nodes[i].end >> PAGE_SHIFT);
397 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
404 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
408 #ifdef CONFIG_NUMA_EMU
409 if (numa_fake && !numa_emulation(start_pfn, end_pfn))
413 #ifdef CONFIG_ACPI_NUMA
414 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
415 end_pfn << PAGE_SHIFT))
419 #ifdef CONFIG_K8_NUMA
420 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
423 printk(KERN_INFO "%s\n",
424 numa_off ? "NUMA turned off" : "No NUMA configuration found");
426 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
427 start_pfn << PAGE_SHIFT,
428 end_pfn << PAGE_SHIFT);
429 /* setup dummy node covering all memory */
431 memnodemap = memnode.embedded_map;
433 nodes_clear(node_online_map);
435 for (i = 0; i < NR_CPUS; i++)
437 node_to_cpumask[0] = cpumask_of_cpu(0);
438 e820_register_active_regions(0, start_pfn, end_pfn);
439 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
442 __cpuinit void numa_add_cpu(int cpu)
444 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
447 void __cpuinit numa_set_node(int cpu, int node)
449 cpu_pda(cpu)->nodenumber = node;
450 cpu_to_node[cpu] = node;
453 unsigned long __init numa_free_all_bootmem(void)
456 unsigned long pages = 0;
457 for_each_online_node(i) {
458 pages += free_all_bootmem_node(NODE_DATA(i));
463 void __init paging_init(void)
466 unsigned long max_zone_pfns[MAX_NR_ZONES];
467 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
468 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
469 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
470 max_zone_pfns[ZONE_NORMAL] = end_pfn;
472 sparse_memory_present_with_active_regions(MAX_NUMNODES);
475 for_each_online_node(i) {
479 free_area_init_nodes(max_zone_pfns);
482 static __init int numa_setup(char *opt)
486 if (!strncmp(opt,"off",3))
488 #ifdef CONFIG_NUMA_EMU
489 if(!strncmp(opt, "fake=", 5)) {
490 numa_fake = simple_strtoul(opt+5,NULL,0); ;
491 if (numa_fake >= MAX_NUMNODES)
492 numa_fake = MAX_NUMNODES;
495 #ifdef CONFIG_ACPI_NUMA
496 if (!strncmp(opt,"noacpi",6))
498 if (!strncmp(opt,"hotadd=", 7))
499 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
504 early_param("numa", numa_setup);
507 * Setup early cpu_to_node.
509 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
510 * and apicid_to_node[] tables have valid entries for a CPU.
511 * This means we skip cpu_to_node[] initialisation for NUMA
512 * emulation and faking node case (when running a kernel compiled
513 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
514 * is already initialized in a round robin manner at numa_init_array,
515 * prior to this call, and this initialization is good enough
516 * for the fake NUMA cases.
518 void __init init_cpu_to_node(void)
521 for (i = 0; i < NR_CPUS; i++) {
522 u8 apicid = x86_cpu_to_apicid[i];
523 if (apicid == BAD_APICID)
525 if (apicid_to_node[apicid] == NUMA_NO_NODE)
527 numa_set_node(i,apicid_to_node[apicid]);
531 EXPORT_SYMBOL(cpu_to_node);
532 EXPORT_SYMBOL(node_to_cpumask);
533 EXPORT_SYMBOL(memnode);
534 EXPORT_SYMBOL(node_data);
536 #ifdef CONFIG_DISCONTIGMEM
538 * Functions to convert PFNs from/to per node page addresses.
539 * These are out of line because they are quite big.
540 * They could be all tuned by pre caching more state.
544 int pfn_valid(unsigned long pfn)
547 if (pfn >= num_physpages)
549 nid = pfn_to_nid(pfn);
552 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
554 EXPORT_SYMBOL(pfn_valid);