2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/workqueue.h>
10 #include <linux/bootmem.h>
11 #include <linux/cpuset.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/smp.h>
20 #include <asm/sysinfo.h>
22 #define PTF_HORIZONTAL (0UL)
23 #define PTF_VERTICAL (1UL)
24 #define PTF_CHECK (2UL)
27 struct mask_info *next;
32 static int topology_enabled = 1;
33 static void topology_work_fn(struct work_struct *work);
34 static struct sysinfo_15_1_x *tl_info;
35 static void set_topology_timer(void);
36 static DECLARE_WORK(topology_work, topology_work_fn);
37 /* topology_lock protects the core linked list */
38 static DEFINE_SPINLOCK(topology_lock);
40 static struct mask_info core_info;
41 cpumask_t cpu_core_map[NR_CPUS];
42 unsigned char cpu_core_id[NR_CPUS];
43 unsigned char cpu_socket_id[NR_CPUS];
45 static struct mask_info book_info;
46 cpumask_t cpu_book_map[NR_CPUS];
47 unsigned char cpu_book_id[NR_CPUS];
49 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
54 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
55 cpumask_copy(&mask, cpumask_of(cpu));
59 if (cpumask_test_cpu(cpu, &info->mask)) {
65 if (cpumask_empty(&mask))
66 cpumask_copy(&mask, cpumask_of(cpu));
70 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
71 struct mask_info *book,
72 struct mask_info *core,
77 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
81 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
82 lcpu = smp_find_processor_id(rcpu);
84 cpumask_set_cpu(lcpu, &book->mask);
85 cpu_book_id[lcpu] = book->id;
86 cpumask_set_cpu(lcpu, &core->mask);
87 cpu_core_id[lcpu] = rcpu;
88 if (one_core_per_cpu) {
89 cpu_socket_id[lcpu] = rcpu;
92 cpu_socket_id[lcpu] = core->id;
94 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
100 static void clear_masks(void)
102 struct mask_info *info;
106 cpumask_clear(&info->mask);
111 cpumask_clear(&info->mask);
116 static union topology_entry *next_tle(union topology_entry *tle)
119 return (union topology_entry *)((struct topology_cpu *)tle + 1);
120 return (union topology_entry *)((struct topology_container *)tle + 1);
123 static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
125 struct mask_info *core = &core_info;
126 struct mask_info *book = &book_info;
127 union topology_entry *tle, *end;
130 end = (union topology_entry *)((unsigned long)info + info->length);
135 book->id = tle->container.id;
139 core->id = tle->container.id;
142 add_cpus_to_mask(&tle->cpu, book, core, 0);
152 static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
154 struct mask_info *core = &core_info;
155 struct mask_info *book = &book_info;
156 union topology_entry *tle, *end;
159 end = (union topology_entry *)((unsigned long)info + info->length);
164 book->id = tle->container.id;
167 core = add_cpus_to_mask(&tle->cpu, book, core, 1);
177 static void tl_to_cores(struct sysinfo_15_1_x *info)
182 spin_lock_irq(&topology_lock);
184 switch (cpu_id.machine) {
187 __tl_to_cores_z10(info);
190 __tl_to_cores_generic(info);
192 spin_unlock_irq(&topology_lock);
195 static void topology_update_polarization_simple(void)
199 mutex_lock(&smp_cpu_state_mutex);
200 for_each_possible_cpu(cpu)
201 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
202 mutex_unlock(&smp_cpu_state_mutex);
205 static int ptf(unsigned long fc)
210 " .insn rre,0xb9a20000,%1,%1\n"
218 int topology_set_cpu_management(int fc)
222 if (!MACHINE_HAS_TOPOLOGY)
225 rc = ptf(PTF_VERTICAL);
227 rc = ptf(PTF_HORIZONTAL);
230 for_each_possible_cpu(cpu)
231 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
235 static void update_cpu_core_map(void)
240 spin_lock_irqsave(&topology_lock, flags);
241 for_each_possible_cpu(cpu) {
242 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
243 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
245 spin_unlock_irqrestore(&topology_lock, flags);
248 void store_topology(struct sysinfo_15_1_x *info)
250 if (topology_max_mnest >= 3)
251 stsi(info, 15, 1, 3);
253 stsi(info, 15, 1, 2);
256 int arch_update_cpu_topology(void)
258 struct sysinfo_15_1_x *info = tl_info;
262 if (!MACHINE_HAS_TOPOLOGY) {
263 update_cpu_core_map();
264 topology_update_polarization_simple();
267 store_topology(info);
269 update_cpu_core_map();
270 for_each_online_cpu(cpu) {
271 dev = get_cpu_device(cpu);
272 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
277 static void topology_work_fn(struct work_struct *work)
279 rebuild_sched_domains();
282 void topology_schedule_update(void)
284 schedule_work(&topology_work);
287 static void topology_timer_fn(unsigned long ignored)
290 topology_schedule_update();
291 set_topology_timer();
294 static struct timer_list topology_timer =
295 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
297 static atomic_t topology_poll = ATOMIC_INIT(0);
299 static void set_topology_timer(void)
301 if (atomic_add_unless(&topology_poll, -1, 0))
302 mod_timer(&topology_timer, jiffies + HZ / 10);
304 mod_timer(&topology_timer, jiffies + HZ * 60);
307 void topology_expect_change(void)
309 if (!MACHINE_HAS_TOPOLOGY)
311 /* This is racy, but it doesn't matter since it is just a heuristic.
312 * Worst case is that we poll in a higher frequency for a bit longer.
314 if (atomic_read(&topology_poll) > 60)
316 atomic_add(60, &topology_poll);
317 set_topology_timer();
320 static int __init early_parse_topology(char *p)
322 if (strncmp(p, "off", 3))
324 topology_enabled = 0;
327 early_param("topology", early_parse_topology);
329 static void __init alloc_masks(struct sysinfo_15_1_x *info,
330 struct mask_info *mask, int offset)
334 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
335 for (i = 0; i < info->mnest - offset; i++)
336 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
337 nr_masks = max(nr_masks, 1);
338 for (i = 0; i < nr_masks; i++) {
339 mask->next = alloc_bootmem(sizeof(struct mask_info));
344 void __init s390_init_cpu_topology(void)
346 struct sysinfo_15_1_x *info;
349 if (!MACHINE_HAS_TOPOLOGY)
351 tl_info = alloc_bootmem_pages(PAGE_SIZE);
353 store_topology(info);
354 pr_info("The CPU configuration topology of the machine is:");
355 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
356 printk(KERN_CONT " %d", info->mag[i]);
357 printk(KERN_CONT " / %d\n", info->mnest);
358 alloc_masks(info, &core_info, 1);
359 alloc_masks(info, &book_info, 2);
362 static int cpu_management;
364 static ssize_t dispatching_show(struct device *dev,
365 struct device_attribute *attr,
370 mutex_lock(&smp_cpu_state_mutex);
371 count = sprintf(buf, "%d\n", cpu_management);
372 mutex_unlock(&smp_cpu_state_mutex);
376 static ssize_t dispatching_store(struct device *dev,
377 struct device_attribute *attr,
384 if (sscanf(buf, "%d %c", &val, &delim) != 1)
386 if (val != 0 && val != 1)
390 mutex_lock(&smp_cpu_state_mutex);
391 if (cpu_management == val)
393 rc = topology_set_cpu_management(val);
396 cpu_management = val;
397 topology_expect_change();
399 mutex_unlock(&smp_cpu_state_mutex);
401 return rc ? rc : count;
403 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
406 static ssize_t cpu_polarization_show(struct device *dev,
407 struct device_attribute *attr, char *buf)
412 mutex_lock(&smp_cpu_state_mutex);
413 switch (smp_cpu_get_polarization(cpu)) {
414 case POLARIZATION_HRZ:
415 count = sprintf(buf, "horizontal\n");
417 case POLARIZATION_VL:
418 count = sprintf(buf, "vertical:low\n");
420 case POLARIZATION_VM:
421 count = sprintf(buf, "vertical:medium\n");
423 case POLARIZATION_VH:
424 count = sprintf(buf, "vertical:high\n");
427 count = sprintf(buf, "unknown\n");
430 mutex_unlock(&smp_cpu_state_mutex);
433 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
435 static struct attribute *topology_cpu_attrs[] = {
436 &dev_attr_polarization.attr,
440 static struct attribute_group topology_cpu_attr_group = {
441 .attrs = topology_cpu_attrs,
444 int topology_cpu_init(struct cpu *cpu)
446 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
449 static int __init topology_init(void)
451 if (!MACHINE_HAS_TOPOLOGY) {
452 topology_update_polarization_simple();
455 set_topology_timer();
457 update_cpu_core_map();
458 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
460 device_initcall(topology_init);