Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kernel / cache.c
1 /*
2  * Extract CPU cache information and expose them via sysfs.
3  *
4  *    Copyright IBM Corp. 2012
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7
8 #include <linux/notifier.h>
9 #include <linux/seq_file.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/cpu.h>
14 #include <asm/facility.h>
15
16 struct cache {
17         unsigned long size;
18         unsigned int line_size;
19         unsigned int associativity;
20         unsigned int nr_sets;
21         unsigned int level   : 3;
22         unsigned int type    : 2;
23         unsigned int private : 1;
24         struct list_head list;
25 };
26
27 struct cache_dir {
28         struct kobject *kobj;
29         struct cache_index_dir *index;
30 };
31
32 struct cache_index_dir {
33         struct kobject kobj;
34         int cpu;
35         struct cache *cache;
36         struct cache_index_dir *next;
37 };
38
39 enum {
40         CACHE_SCOPE_NOTEXISTS,
41         CACHE_SCOPE_PRIVATE,
42         CACHE_SCOPE_SHARED,
43         CACHE_SCOPE_RESERVED,
44 };
45
46 enum {
47         CACHE_TYPE_SEPARATE,
48         CACHE_TYPE_DATA,
49         CACHE_TYPE_INSTRUCTION,
50         CACHE_TYPE_UNIFIED,
51 };
52
53 enum {
54         EXTRACT_TOPOLOGY,
55         EXTRACT_LINE_SIZE,
56         EXTRACT_SIZE,
57         EXTRACT_ASSOCIATIVITY,
58 };
59
60 enum {
61         CACHE_TI_UNIFIED = 0,
62         CACHE_TI_DATA = 0,
63         CACHE_TI_INSTRUCTION,
64 };
65
66 struct cache_info {
67         unsigned char       : 4;
68         unsigned char scope : 2;
69         unsigned char type  : 2;
70 };
71
72 #define CACHE_MAX_LEVEL 8
73
74 union cache_topology {
75         struct cache_info ci[CACHE_MAX_LEVEL];
76         unsigned long long raw;
77 };
78
79 static const char * const cache_type_string[] = {
80         "Data",
81         "Instruction",
82         "Unified",
83 };
84
85 static struct cache_dir *cache_dir_cpu[NR_CPUS];
86 static LIST_HEAD(cache_list);
87
88 void show_cacheinfo(struct seq_file *m)
89 {
90         struct cache *cache;
91         int index = 0;
92
93         list_for_each_entry(cache, &cache_list, list) {
94                 seq_printf(m, "cache%-11d: ", index);
95                 seq_printf(m, "level=%d ", cache->level);
96                 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97                 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98                 seq_printf(m, "size=%luK ", cache->size >> 10);
99                 seq_printf(m, "line_size=%u ", cache->line_size);
100                 seq_printf(m, "associativity=%d", cache->associativity);
101                 seq_puts(m, "\n");
102                 index++;
103         }
104 }
105
106 static inline unsigned long ecag(int ai, int li, int ti)
107 {
108         unsigned long cmd, val;
109
110         cmd = ai << 4 | li << 1 | ti;
111         asm volatile(".insn     rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112                      : "=d" (val) : "a" (cmd));
113         return val;
114 }
115
116 static int __init cache_add(int level, int private, int type)
117 {
118         struct cache *cache;
119         int ti;
120
121         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122         if (!cache)
123                 return -ENOMEM;
124         if (type == CACHE_TYPE_INSTRUCTION)
125                 ti = CACHE_TI_INSTRUCTION;
126         else
127                 ti = CACHE_TI_UNIFIED;
128         cache->size = ecag(EXTRACT_SIZE, level, ti);
129         cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130         cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131         cache->nr_sets = cache->size / cache->associativity;
132         cache->nr_sets /= cache->line_size;
133         cache->private = private;
134         cache->level = level + 1;
135         cache->type = type - 1;
136         list_add_tail(&cache->list, &cache_list);
137         return 0;
138 }
139
140 static void __init cache_build_info(void)
141 {
142         struct cache *cache, *next;
143         union cache_topology ct;
144         int level, private, rc;
145
146         ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147         for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148                 switch (ct.ci[level].scope) {
149                 case CACHE_SCOPE_NOTEXISTS:
150                 case CACHE_SCOPE_RESERVED:
151                         return;
152                 case CACHE_SCOPE_SHARED:
153                         private = 0;
154                         break;
155                 case CACHE_SCOPE_PRIVATE:
156                         private = 1;
157                         break;
158                 }
159                 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
160                         rc  = cache_add(level, private, CACHE_TYPE_DATA);
161                         rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
162                 } else {
163                         rc = cache_add(level, private, ct.ci[level].type);
164                 }
165                 if (rc)
166                         goto error;
167         }
168         return;
169 error:
170         list_for_each_entry_safe(cache, next, &cache_list, list) {
171                 list_del(&cache->list);
172                 kfree(cache);
173         }
174 }
175
176 static struct cache_dir *cache_create_cache_dir(int cpu)
177 {
178         struct cache_dir *cache_dir;
179         struct kobject *kobj = NULL;
180         struct device *dev;
181
182         dev = get_cpu_device(cpu);
183         if (!dev)
184                 goto out;
185         kobj = kobject_create_and_add("cache", &dev->kobj);
186         if (!kobj)
187                 goto out;
188         cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
189         if (!cache_dir)
190                 goto out;
191         cache_dir->kobj = kobj;
192         cache_dir_cpu[cpu] = cache_dir;
193         return cache_dir;
194 out:
195         kobject_put(kobj);
196         return NULL;
197 }
198
199 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
200 {
201         return container_of(kobj, struct cache_index_dir, kobj);
202 }
203
204 static void cache_index_release(struct kobject *kobj)
205 {
206         struct cache_index_dir *index;
207
208         index = kobj_to_cache_index_dir(kobj);
209         kfree(index);
210 }
211
212 static ssize_t cache_index_show(struct kobject *kobj,
213                                 struct attribute *attr, char *buf)
214 {
215         struct kobj_attribute *kobj_attr;
216
217         kobj_attr = container_of(attr, struct kobj_attribute, attr);
218         return kobj_attr->show(kobj, kobj_attr, buf);
219 }
220
221 #define DEFINE_CACHE_ATTR(_name, _format, _value)                       \
222 static ssize_t cache_##_name##_show(struct kobject *kobj,               \
223                                     struct kobj_attribute *attr,        \
224                                     char *buf)                          \
225 {                                                                       \
226         struct cache_index_dir *index;                                  \
227                                                                         \
228         index = kobj_to_cache_index_dir(kobj);                          \
229         return sprintf(buf, _format, _value);                           \
230 }                                                                       \
231 static struct kobj_attribute cache_##_name##_attr =                     \
232         __ATTR(_name, 0444, cache_##_name##_show, NULL);
233
234 DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
235 DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
236 DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
237 DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
238 DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
239 DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
240
241 static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
242 {
243         struct cache_index_dir *index;
244         int len;
245
246         index = kobj_to_cache_index_dir(kobj);
247         len = type ?
248                 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
249                 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
250         len += sprintf(&buf[len], "\n");
251         return len;
252 }
253
254 static ssize_t shared_cpu_map_show(struct kobject *kobj,
255                                    struct kobj_attribute *attr, char *buf)
256 {
257         return shared_cpu_map_func(kobj, 0, buf);
258 }
259 static struct kobj_attribute cache_shared_cpu_map_attr =
260         __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
261
262 static ssize_t shared_cpu_list_show(struct kobject *kobj,
263                                     struct kobj_attribute *attr, char *buf)
264 {
265         return shared_cpu_map_func(kobj, 1, buf);
266 }
267 static struct kobj_attribute cache_shared_cpu_list_attr =
268         __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
269
270 static struct attribute *cache_index_default_attrs[] = {
271         &cache_type_attr.attr,
272         &cache_size_attr.attr,
273         &cache_number_of_sets_attr.attr,
274         &cache_ways_of_associativity_attr.attr,
275         &cache_level_attr.attr,
276         &cache_coherency_line_size_attr.attr,
277         &cache_shared_cpu_map_attr.attr,
278         &cache_shared_cpu_list_attr.attr,
279         NULL,
280 };
281
282 static const struct sysfs_ops cache_index_ops = {
283         .show = cache_index_show,
284 };
285
286 static struct kobj_type cache_index_type = {
287         .sysfs_ops = &cache_index_ops,
288         .release = cache_index_release,
289         .default_attrs = cache_index_default_attrs,
290 };
291
292 static int cache_create_index_dir(struct cache_dir *cache_dir,
293                                   struct cache *cache, int index, int cpu)
294 {
295         struct cache_index_dir *index_dir;
296         int rc;
297
298         index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
299         if (!index_dir)
300                 return -ENOMEM;
301         index_dir->cache = cache;
302         index_dir->cpu = cpu;
303         rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
304                                   cache_dir->kobj, "index%d", index);
305         if (rc)
306                 goto out;
307         index_dir->next = cache_dir->index;
308         cache_dir->index = index_dir;
309         return 0;
310 out:
311         kfree(index_dir);
312         return rc;
313 }
314
315 static int cache_add_cpu(int cpu)
316 {
317         struct cache_dir *cache_dir;
318         struct cache *cache;
319         int rc, index = 0;
320
321         if (list_empty(&cache_list))
322                 return 0;
323         cache_dir = cache_create_cache_dir(cpu);
324         if (!cache_dir)
325                 return -ENOMEM;
326         list_for_each_entry(cache, &cache_list, list) {
327                 if (!cache->private)
328                         break;
329                 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
330                 if (rc)
331                         return rc;
332                 index++;
333         }
334         return 0;
335 }
336
337 static void cache_remove_cpu(int cpu)
338 {
339         struct cache_index_dir *index, *next;
340         struct cache_dir *cache_dir;
341
342         cache_dir = cache_dir_cpu[cpu];
343         if (!cache_dir)
344                 return;
345         index = cache_dir->index;
346         while (index) {
347                 next = index->next;
348                 kobject_put(&index->kobj);
349                 index = next;
350         }
351         kobject_put(cache_dir->kobj);
352         kfree(cache_dir);
353         cache_dir_cpu[cpu] = NULL;
354 }
355
356 static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
357                          void *hcpu)
358 {
359         int cpu = (long)hcpu;
360         int rc = 0;
361
362         switch (action & ~CPU_TASKS_FROZEN) {
363         case CPU_ONLINE:
364                 rc = cache_add_cpu(cpu);
365                 if (rc)
366                         cache_remove_cpu(cpu);
367                 break;
368         case CPU_DEAD:
369                 cache_remove_cpu(cpu);
370                 break;
371         }
372         return rc ? NOTIFY_BAD : NOTIFY_OK;
373 }
374
375 static int __init cache_init(void)
376 {
377         int cpu;
378
379         if (!test_facility(34))
380                 return 0;
381         cache_build_info();
382         for_each_online_cpu(cpu)
383                 cache_add_cpu(cpu);
384         hotcpu_notifier(cache_hotplug, 0);
385         return 0;
386 }
387 device_initcall(cache_init);