2 * acpi_numa.c - ACPI NUMA support
4 * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/acpi.h>
31 #include <linux/numa.h>
33 #define PREFIX "ACPI: "
35 #define ACPI_NUMA 0x80000000
36 #define _COMPONENT ACPI_NUMA
37 ACPI_MODULE_NAME("numa");
39 static nodemask_t nodes_found_map = NODE_MASK_NONE;
41 /* maps to convert between proximity domain and logical node ID */
42 static int pxm_to_node_map[MAX_PXM_DOMAINS]
43 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
44 static int node_to_pxm_map[MAX_NUMNODES]
45 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 unsigned char acpi_srat_revision __initdata;
49 int pxm_to_node(int pxm)
53 return pxm_to_node_map[pxm];
56 int node_to_pxm(int node)
60 return node_to_pxm_map[node];
63 static void __acpi_map_pxm_to_node(int pxm, int node)
65 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
66 pxm_to_node_map[pxm] = node;
67 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
68 node_to_pxm_map[node] = pxm;
71 int acpi_map_pxm_to_node(int pxm)
73 int node = pxm_to_node_map[pxm];
75 if (node == NUMA_NO_NODE) {
76 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
78 node = first_unset_node(nodes_found_map);
79 __acpi_map_pxm_to_node(pxm, node);
80 node_set(node, nodes_found_map);
87 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
90 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
95 switch (header->type) {
97 case ACPI_SRAT_TYPE_CPU_AFFINITY:
98 #ifdef ACPI_DEBUG_OUTPUT
100 struct acpi_srat_cpu_affinity *p =
101 (struct acpi_srat_cpu_affinity *)header;
102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
103 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
104 p->apic_id, p->local_sapic_eid,
105 p->proximity_domain_lo,
106 (p->flags & ACPI_SRAT_CPU_ENABLED)?
107 "enabled" : "disabled"));
109 #endif /* ACPI_DEBUG_OUTPUT */
112 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
113 #ifdef ACPI_DEBUG_OUTPUT
115 struct acpi_srat_mem_affinity *p =
116 (struct acpi_srat_mem_affinity *)header;
117 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
118 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
119 (unsigned long)p->base_address,
120 (unsigned long)p->length,
122 (p->flags & ACPI_SRAT_MEM_ENABLED)?
123 "enabled" : "disabled",
124 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
125 " hot-pluggable" : "",
126 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
127 " non-volatile" : ""));
129 #endif /* ACPI_DEBUG_OUTPUT */
132 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
133 #ifdef ACPI_DEBUG_OUTPUT
135 struct acpi_srat_x2apic_cpu_affinity *p =
136 (struct acpi_srat_x2apic_cpu_affinity *)header;
137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
138 "SRAT Processor (x2apicid[0x%08x]) in"
139 " proximity domain %d %s\n",
142 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
143 "enabled" : "disabled"));
145 #endif /* ACPI_DEBUG_OUTPUT */
148 printk(KERN_WARNING PREFIX
149 "Found unsupported SRAT entry (type = 0x%x)\n",
156 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
157 * up the NUMA heuristics which wants the local node to have a smaller
158 * distance than the others.
159 * Do some quick checks here and only use the SLIT if it passes.
161 static int __init slit_valid(struct acpi_table_slit *slit)
164 int d = slit->locality_count;
165 for (i = 0; i < d; i++) {
166 for (j = 0; j < d; j++) {
167 u8 val = slit->entry[d*i + j];
169 if (val != LOCAL_DISTANCE)
171 } else if (val <= LOCAL_DISTANCE)
178 static int __init acpi_parse_slit(struct acpi_table_header *table)
180 struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
182 if (!slit_valid(slit)) {
183 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
186 acpi_numa_slit_init(slit);
192 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
194 printk(KERN_WARNING PREFIX
195 "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
201 acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
202 const unsigned long end)
204 struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
206 processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
207 if (!processor_affinity)
210 acpi_table_print_srat_entry(header);
212 /* let architecture-dependent part to do it */
213 acpi_numa_x2apic_affinity_init(processor_affinity);
219 acpi_parse_processor_affinity(struct acpi_subtable_header *header,
220 const unsigned long end)
222 struct acpi_srat_cpu_affinity *processor_affinity;
224 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
225 if (!processor_affinity)
228 acpi_table_print_srat_entry(header);
230 /* let architecture-dependent part to do it */
231 acpi_numa_processor_affinity_init(processor_affinity);
236 static int __initdata parsed_numa_memblks;
239 acpi_parse_memory_affinity(struct acpi_subtable_header * header,
240 const unsigned long end)
242 struct acpi_srat_mem_affinity *memory_affinity;
244 memory_affinity = (struct acpi_srat_mem_affinity *)header;
245 if (!memory_affinity)
248 acpi_table_print_srat_entry(header);
250 /* let architecture-dependent part to do it */
251 if (!acpi_numa_memory_affinity_init(memory_affinity))
252 parsed_numa_memblks++;
256 static int __init acpi_parse_srat(struct acpi_table_header *table)
258 struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
260 acpi_srat_revision = srat->header.revision;
262 /* Real work done in acpi_table_parse_srat below. */
268 acpi_table_parse_srat(enum acpi_srat_type id,
269 acpi_tbl_entry_handler handler, unsigned int max_entries)
271 return acpi_table_parse_entries(ACPI_SIG_SRAT,
272 sizeof(struct acpi_table_srat), id,
273 handler, max_entries);
276 int __init acpi_numa_init(void)
281 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
282 * SRAT cpu entries could have different order with that in MADT.
283 * So go over all cpu entries in SRAT to get apicid to node mapping.
286 /* SRAT: Static Resource Affinity Table */
287 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
288 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
289 acpi_parse_x2apic_affinity, 0);
290 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
291 acpi_parse_processor_affinity, 0);
292 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
293 acpi_parse_memory_affinity,
297 /* SLIT: System Locality Information Table */
298 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
300 acpi_numa_arch_fixup();
304 else if (!parsed_numa_memblks)
309 static int acpi_get_pxm(acpi_handle h)
311 unsigned long long pxm;
314 acpi_handle phandle = h;
318 status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
319 if (ACPI_SUCCESS(status))
321 status = acpi_get_parent(handle, &phandle);
322 } while (ACPI_SUCCESS(status));
326 int acpi_get_node(acpi_handle handle)
330 pxm = acpi_get_pxm(handle);
331 if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
334 return acpi_map_pxm_to_node(pxm);
336 EXPORT_SYMBOL(acpi_get_node);