cpu: add generic support for CPU feature based module autoloading
[firefly-linux-kernel-4.4.55.git] / drivers / base / cpu.c
1 /*
2  * CPU subsystem support
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/percpu.h>
16 #include <linux/cpufeature.h>
17
18 #include "base.h"
19
20 struct bus_type cpu_subsys = {
21         .name = "cpu",
22         .dev_name = "cpu",
23 };
24 EXPORT_SYMBOL_GPL(cpu_subsys);
25
26 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
27
28 #ifdef CONFIG_HOTPLUG_CPU
29 static void change_cpu_under_node(struct cpu *cpu,
30                         unsigned int from_nid, unsigned int to_nid)
31 {
32         int cpuid = cpu->dev.id;
33         unregister_cpu_under_node(cpuid, from_nid);
34         register_cpu_under_node(cpuid, to_nid);
35         cpu->node_id = to_nid;
36 }
37
38 static ssize_t show_online(struct device *dev,
39                            struct device_attribute *attr,
40                            char *buf)
41 {
42         struct cpu *cpu = container_of(dev, struct cpu, dev);
43
44         return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
45 }
46
47 static ssize_t __ref store_online(struct device *dev,
48                                   struct device_attribute *attr,
49                                   const char *buf, size_t count)
50 {
51         struct cpu *cpu = container_of(dev, struct cpu, dev);
52         int cpuid = cpu->dev.id;
53         int from_nid, to_nid;
54         ssize_t ret;
55
56         cpu_hotplug_driver_lock();
57         switch (buf[0]) {
58         case '0':
59                 ret = cpu_down(cpuid);
60                 if (!ret)
61                         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
62                 break;
63         case '1':
64                 from_nid = cpu_to_node(cpuid);
65                 ret = cpu_up(cpuid);
66
67                 /*
68                  * When hot adding memory to memoryless node and enabling a cpu
69                  * on the node, node number of the cpu may internally change.
70                  */
71                 to_nid = cpu_to_node(cpuid);
72                 if (from_nid != to_nid)
73                         change_cpu_under_node(cpu, from_nid, to_nid);
74
75                 if (!ret)
76                         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
77                 break;
78         default:
79                 ret = -EINVAL;
80         }
81         cpu_hotplug_driver_unlock();
82
83         if (ret >= 0)
84                 ret = count;
85         return ret;
86 }
87 static DEVICE_ATTR(online, 0644, show_online, store_online);
88
89 static void __cpuinit register_cpu_control(struct cpu *cpu)
90 {
91         device_create_file(&cpu->dev, &dev_attr_online);
92 }
93 void unregister_cpu(struct cpu *cpu)
94 {
95         int logical_cpu = cpu->dev.id;
96
97         unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
98
99         device_remove_file(&cpu->dev, &dev_attr_online);
100
101         device_unregister(&cpu->dev);
102         per_cpu(cpu_sys_devices, logical_cpu) = NULL;
103         return;
104 }
105
106 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
107 static ssize_t cpu_probe_store(struct device *dev,
108                                struct device_attribute *attr,
109                                const char *buf,
110                                size_t count)
111 {
112         return arch_cpu_probe(buf, count);
113 }
114
115 static ssize_t cpu_release_store(struct device *dev,
116                                  struct device_attribute *attr,
117                                  const char *buf,
118                                  size_t count)
119 {
120         return arch_cpu_release(buf, count);
121 }
122
123 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
124 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
125 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
126
127 #else /* ... !CONFIG_HOTPLUG_CPU */
128 static inline void register_cpu_control(struct cpu *cpu)
129 {
130 }
131 #endif /* CONFIG_HOTPLUG_CPU */
132
133 #ifdef CONFIG_KEXEC
134 #include <linux/kexec.h>
135
136 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
137                                 char *buf)
138 {
139         struct cpu *cpu = container_of(dev, struct cpu, dev);
140         ssize_t rc;
141         unsigned long long addr;
142         int cpunum;
143
144         cpunum = cpu->dev.id;
145
146         /*
147          * Might be reading other cpu's data based on which cpu read thread
148          * has been scheduled. But cpu data (memory) is allocated once during
149          * boot up and this data does not change there after. Hence this
150          * operation should be safe. No locking required.
151          */
152         addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
153         rc = sprintf(buf, "%Lx\n", addr);
154         return rc;
155 }
156 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
157
158 static ssize_t show_crash_notes_size(struct device *dev,
159                                      struct device_attribute *attr,
160                                      char *buf)
161 {
162         ssize_t rc;
163
164         rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
165         return rc;
166 }
167 static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
168 #endif
169
170 /*
171  * Print cpu online, possible, present, and system maps
172  */
173
174 struct cpu_attr {
175         struct device_attribute attr;
176         const struct cpumask *const * const map;
177 };
178
179 static ssize_t show_cpus_attr(struct device *dev,
180                               struct device_attribute *attr,
181                               char *buf)
182 {
183         struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
184         int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
185
186         buf[n++] = '\n';
187         buf[n] = '\0';
188         return n;
189 }
190
191 #define _CPU_ATTR(name, map) \
192         { __ATTR(name, 0444, show_cpus_attr, NULL), map }
193
194 /* Keep in sync with cpu_subsys_attrs */
195 static struct cpu_attr cpu_attrs[] = {
196         _CPU_ATTR(online, &cpu_online_mask),
197         _CPU_ATTR(possible, &cpu_possible_mask),
198         _CPU_ATTR(present, &cpu_present_mask),
199 };
200
201 /*
202  * Print values for NR_CPUS and offlined cpus
203  */
204 static ssize_t print_cpus_kernel_max(struct device *dev,
205                                      struct device_attribute *attr, char *buf)
206 {
207         int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
208         return n;
209 }
210 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
211
212 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
213 unsigned int total_cpus;
214
215 static ssize_t print_cpus_offline(struct device *dev,
216                                   struct device_attribute *attr, char *buf)
217 {
218         int n = 0, len = PAGE_SIZE-2;
219         cpumask_var_t offline;
220
221         /* display offline cpus < nr_cpu_ids */
222         if (!alloc_cpumask_var(&offline, GFP_KERNEL))
223                 return -ENOMEM;
224         cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
225         n = cpulist_scnprintf(buf, len, offline);
226         free_cpumask_var(offline);
227
228         /* display offline cpus >= nr_cpu_ids */
229         if (total_cpus && nr_cpu_ids < total_cpus) {
230                 if (n && n < len)
231                         buf[n++] = ',';
232
233                 if (nr_cpu_ids == total_cpus-1)
234                         n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
235                 else
236                         n += snprintf(&buf[n], len - n, "%d-%d",
237                                                       nr_cpu_ids, total_cpus-1);
238         }
239
240         n += snprintf(&buf[n], len - n, "\n");
241         return n;
242 }
243 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
244
245 static void cpu_device_release(struct device *dev)
246 {
247         /*
248          * This is an empty function to prevent the driver core from spitting a
249          * warning at us.  Yes, I know this is directly opposite of what the
250          * documentation for the driver core and kobjects say, and the author
251          * of this code has already been publically ridiculed for doing
252          * something as foolish as this.  However, at this point in time, it is
253          * the only way to handle the issue of statically allocated cpu
254          * devices.  The different architectures will have their cpu device
255          * code reworked to properly handle this in the near future, so this
256          * function will then be changed to correctly free up the memory held
257          * by the cpu device.
258          *
259          * Never copy this way of doing things, or you too will be made fun of
260          * on the linux-kernel list, you have been warned.
261          */
262 }
263
264 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
265 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
266 static ssize_t print_cpu_modalias(struct device *dev,
267                                   struct device_attribute *attr,
268                                   char *buf)
269 {
270         ssize_t n;
271         u32 i;
272
273         n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
274                     CPU_FEATURE_TYPEVAL);
275
276         for (i = 0; i < MAX_CPU_FEATURES; i++)
277                 if (cpu_have_feature(i)) {
278                         if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
279                                 WARN(1, "CPU features overflow page\n");
280                                 break;
281                         }
282                         n += sprintf(&buf[n], ",%04X", i);
283                 }
284         buf[n++] = '\n';
285         return n;
286 }
287 #else
288 #define print_cpu_modalias      arch_print_cpu_modalias
289 #endif
290
291 static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
292 {
293         char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
294         if (buf) {
295                 print_cpu_modalias(NULL, NULL, buf);
296                 add_uevent_var(env, "MODALIAS=%s", buf);
297                 kfree(buf);
298         }
299         return 0;
300 }
301 #endif
302
303 /*
304  * register_cpu - Setup a sysfs device for a CPU.
305  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
306  *        sysfs for this CPU.
307  * @num - CPU number to use when creating the device.
308  *
309  * Initialize and register the CPU device.
310  */
311 int __cpuinit register_cpu(struct cpu *cpu, int num)
312 {
313         int error;
314
315         cpu->node_id = cpu_to_node(num);
316         memset(&cpu->dev, 0x00, sizeof(struct device));
317         cpu->dev.id = num;
318         cpu->dev.bus = &cpu_subsys;
319         cpu->dev.release = cpu_device_release;
320 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
321         cpu->dev.bus->uevent = cpu_uevent;
322 #endif
323         error = device_register(&cpu->dev);
324         if (!error && cpu->hotpluggable)
325                 register_cpu_control(cpu);
326         if (!error)
327                 per_cpu(cpu_sys_devices, num) = &cpu->dev;
328         if (!error)
329                 register_cpu_under_node(num, cpu_to_node(num));
330
331 #ifdef CONFIG_KEXEC
332         if (!error)
333                 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
334         if (!error)
335                 error = device_create_file(&cpu->dev,
336                                            &dev_attr_crash_notes_size);
337 #endif
338         return error;
339 }
340
341 struct device *get_cpu_device(unsigned cpu)
342 {
343         if (cpu < nr_cpu_ids && cpu_possible(cpu))
344                 return per_cpu(cpu_sys_devices, cpu);
345         else
346                 return NULL;
347 }
348 EXPORT_SYMBOL_GPL(get_cpu_device);
349
350 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
351 static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
352 #endif
353
354 static struct attribute *cpu_root_attrs[] = {
355 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
356         &dev_attr_probe.attr,
357         &dev_attr_release.attr,
358 #endif
359         &cpu_attrs[0].attr.attr,
360         &cpu_attrs[1].attr.attr,
361         &cpu_attrs[2].attr.attr,
362         &dev_attr_kernel_max.attr,
363         &dev_attr_offline.attr,
364 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
365         &dev_attr_modalias.attr,
366 #endif
367         NULL
368 };
369
370 static struct attribute_group cpu_root_attr_group = {
371         .attrs = cpu_root_attrs,
372 };
373
374 static const struct attribute_group *cpu_root_attr_groups[] = {
375         &cpu_root_attr_group,
376         NULL,
377 };
378
379 bool cpu_is_hotpluggable(unsigned cpu)
380 {
381         struct device *dev = get_cpu_device(cpu);
382         return dev && container_of(dev, struct cpu, dev)->hotpluggable;
383 }
384 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
385
386 #ifdef CONFIG_GENERIC_CPU_DEVICES
387 static DEFINE_PER_CPU(struct cpu, cpu_devices);
388 #endif
389
390 static void __init cpu_dev_register_generic(void)
391 {
392 #ifdef CONFIG_GENERIC_CPU_DEVICES
393         int i;
394
395         for_each_possible_cpu(i) {
396                 if (register_cpu(&per_cpu(cpu_devices, i), i))
397                         panic("Failed to register CPU device");
398         }
399 #endif
400 }
401
402 void __init cpu_dev_init(void)
403 {
404         if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
405                 panic("Failed to register CPU subsystem");
406
407         cpu_dev_register_generic();
408 }