2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/workqueue.h>
10 #include <linux/bootmem.h>
11 #include <linux/cpuset.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/smp.h>
20 #include <asm/sysinfo.h>
22 #define PTF_HORIZONTAL (0UL)
23 #define PTF_VERTICAL (1UL)
24 #define PTF_CHECK (2UL)
27 struct mask_info *next;
32 static void set_topology_timer(void);
33 static void topology_work_fn(struct work_struct *work);
34 static struct sysinfo_15_1_x *tl_info;
36 static int topology_enabled = 1;
37 static DECLARE_WORK(topology_work, topology_work_fn);
39 /* topology_lock protects the socket and book linked lists */
40 static DEFINE_SPINLOCK(topology_lock);
41 static struct mask_info socket_info;
42 static struct mask_info book_info;
44 struct cpu_topology_s390 cpu_topology[NR_CPUS];
46 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
50 cpumask_copy(&mask, cpumask_of(cpu));
51 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
53 for (; info; info = info->next) {
54 if (cpumask_test_cpu(cpu, &info->mask))
60 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
61 struct mask_info *book,
62 struct mask_info *socket,
63 int one_socket_per_cpu)
67 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
71 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
72 lcpu = smp_find_processor_id(rcpu);
75 cpumask_set_cpu(lcpu, &book->mask);
76 cpu_topology[lcpu].book_id = book->id;
77 cpumask_set_cpu(lcpu, &socket->mask);
78 cpu_topology[lcpu].core_id = rcpu;
79 if (one_socket_per_cpu) {
80 cpu_topology[lcpu].socket_id = rcpu;
81 socket = socket->next;
83 cpu_topology[lcpu].socket_id = socket->id;
85 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
90 static void clear_masks(void)
92 struct mask_info *info;
96 cpumask_clear(&info->mask);
101 cpumask_clear(&info->mask);
106 static union topology_entry *next_tle(union topology_entry *tle)
109 return (union topology_entry *)((struct topology_cpu *)tle + 1);
110 return (union topology_entry *)((struct topology_container *)tle + 1);
113 static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
115 struct mask_info *socket = &socket_info;
116 struct mask_info *book = &book_info;
117 union topology_entry *tle, *end;
120 end = (union topology_entry *)((unsigned long)info + info->length);
125 book->id = tle->container.id;
128 socket = socket->next;
129 socket->id = tle->container.id;
132 add_cpus_to_mask(&tle->cpu, book, socket, 0);
142 static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
144 struct mask_info *socket = &socket_info;
145 struct mask_info *book = &book_info;
146 union topology_entry *tle, *end;
149 end = (union topology_entry *)((unsigned long)info + info->length);
154 book->id = tle->container.id;
157 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
167 static void tl_to_masks(struct sysinfo_15_1_x *info)
171 spin_lock_irq(&topology_lock);
174 switch (cpu_id.machine) {
177 __tl_to_masks_z10(info);
180 __tl_to_masks_generic(info);
182 spin_unlock_irq(&topology_lock);
185 static void topology_update_polarization_simple(void)
189 mutex_lock(&smp_cpu_state_mutex);
190 for_each_possible_cpu(cpu)
191 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
192 mutex_unlock(&smp_cpu_state_mutex);
195 static int ptf(unsigned long fc)
200 " .insn rre,0xb9a20000,%1,%1\n"
208 int topology_set_cpu_management(int fc)
212 if (!MACHINE_HAS_TOPOLOGY)
215 rc = ptf(PTF_VERTICAL);
217 rc = ptf(PTF_HORIZONTAL);
220 for_each_possible_cpu(cpu)
221 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
225 static void update_cpu_masks(void)
230 spin_lock_irqsave(&topology_lock, flags);
231 for_each_possible_cpu(cpu) {
232 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
233 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
234 if (!MACHINE_HAS_TOPOLOGY) {
235 cpu_topology[cpu].core_id = cpu;
236 cpu_topology[cpu].socket_id = cpu;
237 cpu_topology[cpu].book_id = cpu;
240 spin_unlock_irqrestore(&topology_lock, flags);
243 void store_topology(struct sysinfo_15_1_x *info)
245 if (topology_max_mnest >= 3)
246 stsi(info, 15, 1, 3);
248 stsi(info, 15, 1, 2);
251 int arch_update_cpu_topology(void)
253 struct sysinfo_15_1_x *info = tl_info;
257 if (!MACHINE_HAS_TOPOLOGY) {
259 topology_update_polarization_simple();
262 store_topology(info);
265 for_each_online_cpu(cpu) {
266 dev = get_cpu_device(cpu);
267 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
272 static void topology_work_fn(struct work_struct *work)
274 rebuild_sched_domains();
277 void topology_schedule_update(void)
279 schedule_work(&topology_work);
282 static void topology_timer_fn(unsigned long ignored)
285 topology_schedule_update();
286 set_topology_timer();
289 static struct timer_list topology_timer =
290 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
292 static atomic_t topology_poll = ATOMIC_INIT(0);
294 static void set_topology_timer(void)
296 if (atomic_add_unless(&topology_poll, -1, 0))
297 mod_timer(&topology_timer, jiffies + HZ / 10);
299 mod_timer(&topology_timer, jiffies + HZ * 60);
302 void topology_expect_change(void)
304 if (!MACHINE_HAS_TOPOLOGY)
306 /* This is racy, but it doesn't matter since it is just a heuristic.
307 * Worst case is that we poll in a higher frequency for a bit longer.
309 if (atomic_read(&topology_poll) > 60)
311 atomic_add(60, &topology_poll);
312 set_topology_timer();
315 static int __init early_parse_topology(char *p)
317 if (strncmp(p, "off", 3))
319 topology_enabled = 0;
322 early_param("topology", early_parse_topology);
324 static void __init alloc_masks(struct sysinfo_15_1_x *info,
325 struct mask_info *mask, int offset)
329 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
330 for (i = 0; i < info->mnest - offset; i++)
331 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
332 nr_masks = max(nr_masks, 1);
333 for (i = 0; i < nr_masks; i++) {
334 mask->next = alloc_bootmem(sizeof(struct mask_info));
339 void __init s390_init_cpu_topology(void)
341 struct sysinfo_15_1_x *info;
344 if (!MACHINE_HAS_TOPOLOGY)
346 tl_info = alloc_bootmem_pages(PAGE_SIZE);
348 store_topology(info);
349 pr_info("The CPU configuration topology of the machine is:");
350 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
351 printk(KERN_CONT " %d", info->mag[i]);
352 printk(KERN_CONT " / %d\n", info->mnest);
353 alloc_masks(info, &socket_info, 1);
354 alloc_masks(info, &book_info, 2);
357 static int cpu_management;
359 static ssize_t dispatching_show(struct device *dev,
360 struct device_attribute *attr,
365 mutex_lock(&smp_cpu_state_mutex);
366 count = sprintf(buf, "%d\n", cpu_management);
367 mutex_unlock(&smp_cpu_state_mutex);
371 static ssize_t dispatching_store(struct device *dev,
372 struct device_attribute *attr,
379 if (sscanf(buf, "%d %c", &val, &delim) != 1)
381 if (val != 0 && val != 1)
385 mutex_lock(&smp_cpu_state_mutex);
386 if (cpu_management == val)
388 rc = topology_set_cpu_management(val);
391 cpu_management = val;
392 topology_expect_change();
394 mutex_unlock(&smp_cpu_state_mutex);
396 return rc ? rc : count;
398 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
401 static ssize_t cpu_polarization_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
407 mutex_lock(&smp_cpu_state_mutex);
408 switch (smp_cpu_get_polarization(cpu)) {
409 case POLARIZATION_HRZ:
410 count = sprintf(buf, "horizontal\n");
412 case POLARIZATION_VL:
413 count = sprintf(buf, "vertical:low\n");
415 case POLARIZATION_VM:
416 count = sprintf(buf, "vertical:medium\n");
418 case POLARIZATION_VH:
419 count = sprintf(buf, "vertical:high\n");
422 count = sprintf(buf, "unknown\n");
425 mutex_unlock(&smp_cpu_state_mutex);
428 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
430 static struct attribute *topology_cpu_attrs[] = {
431 &dev_attr_polarization.attr,
435 static struct attribute_group topology_cpu_attr_group = {
436 .attrs = topology_cpu_attrs,
439 int topology_cpu_init(struct cpu *cpu)
441 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
444 static int __init topology_init(void)
446 if (!MACHINE_HAS_TOPOLOGY) {
447 topology_update_polarization_simple();
450 set_topology_timer();
453 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
455 device_initcall(topology_init);