rk: temp revert rk change
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_stats.c
1 /*
2  *  drivers/cpufreq/cpufreq_stats.c
3  *
4  *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5  *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <linux/sort.h>
24 #include <linux/err.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 #include <asm/cputime.h>
28 #ifdef CONFIG_BL_SWITCHER
29 #include <asm/bL_switcher.h>
30 #endif
31
32 static spinlock_t cpufreq_stats_lock;
33
34 struct cpufreq_stats {
35         unsigned int cpu;
36         unsigned int total_trans;
37         unsigned long long  last_time;
38         unsigned int max_state;
39         unsigned int state_num;
40         unsigned int last_index;
41         u64 *time_in_state;
42         unsigned int *freq_table;
43 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
44         unsigned int *trans_table;
45 #endif
46 };
47
48 struct all_cpufreq_stats {
49         unsigned int state_num;
50         cputime64_t *time_in_state;
51         unsigned int *freq_table;
52 };
53
54 struct cpufreq_power_stats {
55         unsigned int state_num;
56         unsigned int *curr;
57         unsigned int *freq_table;
58 };
59
60 struct all_freq_table {
61         unsigned int *freq_table;
62         unsigned int table_size;
63 };
64
65 static struct all_freq_table *all_freq_table;
66
67 static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
68 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
69 static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
70
71 struct cpufreq_stats_attribute {
72         struct attribute attr;
73         ssize_t(*show) (struct cpufreq_stats *, char *);
74 };
75
76 static int cpufreq_stats_update(unsigned int cpu)
77 {
78         struct cpufreq_stats *stat;
79         struct all_cpufreq_stats *all_stat;
80         unsigned long long cur_time;
81
82         cur_time = get_jiffies_64();
83         spin_lock(&cpufreq_stats_lock);
84         stat = per_cpu(cpufreq_stats_table, cpu);
85         all_stat = per_cpu(all_cpufreq_stats, cpu);
86         if (!stat) {
87                 spin_unlock(&cpufreq_stats_lock);
88                 return 0;
89         }
90         if (stat->time_in_state) {
91                 stat->time_in_state[stat->last_index] +=
92                         cur_time - stat->last_time;
93                 if (all_stat)
94                         all_stat->time_in_state[stat->last_index] +=
95                                         cur_time - stat->last_time;
96         }
97         stat->last_time = cur_time;
98         spin_unlock(&cpufreq_stats_lock);
99         return 0;
100 }
101
102 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
103 {
104         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
105         if (!stat)
106                 return 0;
107         return sprintf(buf, "%d\n",
108                         per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
109 }
110
111 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
112 {
113         ssize_t len = 0;
114         int i;
115         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
116         if (!stat)
117                 return 0;
118         cpufreq_stats_update(stat->cpu);
119         for (i = 0; i < stat->state_num; i++) {
120                 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
121                         (unsigned long long)
122                         jiffies_64_to_clock_t(stat->time_in_state[i]));
123         }
124         return len;
125 }
126
127 static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
128                 unsigned int freq)
129 {
130         int i;
131         if (!all_stat)
132                 return -1;
133         for (i = 0; i < all_stat->state_num; i++) {
134                 if (all_stat->freq_table[i] == freq)
135                         return i;
136         }
137         return -1;
138 }
139
140 void acct_update_power(struct task_struct *task, cputime_t cputime) {
141         struct cpufreq_power_stats *powerstats;
142         struct cpufreq_stats *stats;
143         unsigned int cpu_num, curr;
144
145         if (!task)
146                 return;
147         cpu_num = task_cpu(task);
148         powerstats = per_cpu(cpufreq_power_stats, cpu_num);
149         stats = per_cpu(cpufreq_stats_table, cpu_num);
150         if (!powerstats || !stats)
151                 return;
152
153         curr = powerstats->curr[stats->last_index];
154         if (task->cpu_power != ULLONG_MAX)
155                 task->cpu_power += curr * cputime_to_usecs(cputime);
156 }
157 EXPORT_SYMBOL_GPL(acct_update_power);
158
159 static ssize_t show_current_in_state(struct kobject *kobj,
160                 struct kobj_attribute *attr, char *buf)
161 {
162         ssize_t len = 0;
163         unsigned int i, cpu;
164         struct cpufreq_power_stats *powerstats;
165
166         spin_lock(&cpufreq_stats_lock);
167         for_each_possible_cpu(cpu) {
168                 powerstats = per_cpu(cpufreq_power_stats, cpu);
169                 if (!powerstats)
170                         continue;
171                 len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
172                 for (i = 0; i < powerstats->state_num; i++)
173                         len += scnprintf(buf + len, PAGE_SIZE - len,
174                                         "%d=%d ", powerstats->freq_table[i],
175                                         powerstats->curr[i]);
176                 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
177         }
178         spin_unlock(&cpufreq_stats_lock);
179         return len;
180 }
181
182 static ssize_t show_all_time_in_state(struct kobject *kobj,
183                 struct kobj_attribute *attr, char *buf)
184 {
185         ssize_t len = 0;
186         unsigned int i, cpu, freq, index;
187         struct all_cpufreq_stats *all_stat;
188         struct cpufreq_policy *policy;
189
190         len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
191         for_each_possible_cpu(cpu) {
192                 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
193                 if (cpu_online(cpu))
194                         cpufreq_stats_update(cpu);
195         }
196
197         if (!all_freq_table)
198                 goto out;
199         for (i = 0; i < all_freq_table->table_size; i++) {
200                 freq = all_freq_table->freq_table[i];
201                 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
202                 for_each_possible_cpu(cpu) {
203                         policy = cpufreq_cpu_get(cpu);
204                         if (policy == NULL)
205                                 continue;
206                         all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
207                         index = get_index_all_cpufreq_stat(all_stat, freq);
208                         if (index != -1) {
209                                 len += scnprintf(buf + len, PAGE_SIZE - len,
210                                         "%llu\t\t", (unsigned long long)
211                                         cputime64_to_clock_t(all_stat->time_in_state[index]));
212                         } else {
213                                 len += scnprintf(buf + len, PAGE_SIZE - len,
214                                                 "N/A\t\t");
215                         }
216                         cpufreq_cpu_put(policy);
217                 }
218         }
219
220 out:
221         len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
222         return len;
223 }
224
225 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
226 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
227 {
228         ssize_t len = 0;
229         int i, j;
230
231         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
232         if (!stat)
233                 return 0;
234         cpufreq_stats_update(stat->cpu);
235         len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
236         len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
237         for (i = 0; i < stat->state_num; i++) {
238                 if (len >= PAGE_SIZE)
239                         break;
240                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
241                                 stat->freq_table[i]);
242         }
243         if (len >= PAGE_SIZE)
244                 return PAGE_SIZE;
245
246         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
247
248         for (i = 0; i < stat->state_num; i++) {
249                 if (len >= PAGE_SIZE)
250                         break;
251
252                 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
253                                 stat->freq_table[i]);
254
255                 for (j = 0; j < stat->state_num; j++)   {
256                         if (len >= PAGE_SIZE)
257                                 break;
258                         len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
259                                         stat->trans_table[i*stat->max_state+j]);
260                 }
261                 if (len >= PAGE_SIZE)
262                         break;
263                 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
264         }
265         if (len >= PAGE_SIZE)
266                 return PAGE_SIZE;
267         return len;
268 }
269 cpufreq_freq_attr_ro(trans_table);
270 #endif
271
272 cpufreq_freq_attr_ro(total_trans);
273 cpufreq_freq_attr_ro(time_in_state);
274
275 static struct attribute *default_attrs[] = {
276         &total_trans.attr,
277         &time_in_state.attr,
278 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
279         &trans_table.attr,
280 #endif
281         NULL
282 };
283 static struct attribute_group stats_attr_group = {
284         .attrs = default_attrs,
285         .name = "stats"
286 };
287
288 static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
289                 0444, show_all_time_in_state, NULL);
290
291 static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
292                 0444, show_current_in_state, NULL);
293
294 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
295 {
296         int index;
297         for (index = 0; index < stat->max_state; index++)
298                 if (stat->freq_table[index] == freq)
299                         return index;
300         return -1;
301 }
302
303 /* should be called late in the CPU removal sequence so that the stats
304  * memory is still available in case someone tries to use it.
305  */
306 static void cpufreq_stats_free_table(unsigned int cpu)
307 {
308         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
309
310         if (stat) {
311                 pr_debug("%s: Free stat table\n", __func__);
312                 kfree(stat->time_in_state);
313                 kfree(stat);
314                 per_cpu(cpufreq_stats_table, cpu) = NULL;
315         }
316 }
317
318 /* must be called early in the CPU removal sequence (before
319  * cpufreq_remove_dev) so that policy is still valid.
320  */
321 static void cpufreq_stats_free_sysfs(unsigned int cpu)
322 {
323         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
324
325         if (!policy)
326                 return;
327
328         if (!cpufreq_frequency_get_table(cpu))
329                 goto put_ref;
330
331         if (!policy_is_shared(policy)) {
332                 pr_debug("%s: Free sysfs stat\n", __func__);
333                 sysfs_remove_group(&policy->kobj, &stats_attr_group);
334         }
335
336 put_ref:
337         cpufreq_cpu_put(policy);
338 }
339
340 static void cpufreq_allstats_free(void)
341 {
342         int cpu;
343         struct all_cpufreq_stats *all_stat;
344
345         sysfs_remove_file(cpufreq_global_kobject,
346                                                 &_attr_all_time_in_state.attr);
347
348         for_each_possible_cpu(cpu) {
349                 all_stat = per_cpu(all_cpufreq_stats, cpu);
350                 if (!all_stat)
351                         continue;
352                 kfree(all_stat->time_in_state);
353                 kfree(all_stat);
354                 per_cpu(all_cpufreq_stats, cpu) = NULL;
355         }
356         if (all_freq_table) {
357                 kfree(all_freq_table->freq_table);
358                 kfree(all_freq_table);
359                 all_freq_table = NULL;
360         }
361 }
362
363 static void cpufreq_powerstats_free(void)
364 {
365         int cpu;
366         struct cpufreq_power_stats *powerstats;
367
368         sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
369
370         for_each_possible_cpu(cpu) {
371                 powerstats = per_cpu(cpufreq_power_stats, cpu);
372                 if (!powerstats)
373                         continue;
374                 kfree(powerstats->curr);
375                 kfree(powerstats);
376                 per_cpu(cpufreq_power_stats, cpu) = NULL;
377         }
378 }
379
380 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
381                 struct cpufreq_frequency_table *table, int count)
382 {
383         unsigned int i, j, ret = 0;
384         struct cpufreq_stats *stat;
385         struct cpufreq_policy *data;
386         unsigned int alloc_size;
387         unsigned int cpu = policy->cpu;
388         if (per_cpu(cpufreq_stats_table, cpu))
389                 return -EBUSY;
390         stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
391         if ((stat) == NULL)
392                 return -ENOMEM;
393
394         data = cpufreq_cpu_get(cpu);
395         if (data == NULL) {
396                 ret = -EINVAL;
397                 goto error_get_fail;
398         }
399
400         ret = sysfs_create_group(&data->kobj, &stats_attr_group);
401         if (ret)
402                 goto error_out;
403
404         stat->cpu = cpu;
405         per_cpu(cpufreq_stats_table, cpu) = stat;
406
407
408         alloc_size = count * sizeof(int) + count * sizeof(u64);
409
410 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
411         alloc_size += count * count * sizeof(int);
412 #endif
413         stat->max_state = count;
414         stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
415         if (!stat->time_in_state) {
416                 ret = -ENOMEM;
417                 goto error_out;
418         }
419         stat->freq_table = (unsigned int *)(stat->time_in_state + count);
420
421 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
422         stat->trans_table = stat->freq_table + count;
423 #endif
424         j = 0;
425         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
426                 unsigned int freq = table[i].frequency;
427                 if (freq == CPUFREQ_ENTRY_INVALID)
428                         continue;
429                 if (freq_table_get_index(stat, freq) == -1)
430                         stat->freq_table[j++] = freq;
431         }
432         stat->state_num = j;
433         spin_lock(&cpufreq_stats_lock);
434         stat->last_time = get_jiffies_64();
435         stat->last_index = freq_table_get_index(stat, policy->cur);
436         spin_unlock(&cpufreq_stats_lock);
437         cpufreq_cpu_put(data);
438         return 0;
439 error_out:
440         cpufreq_cpu_put(data);
441 error_get_fail:
442         kfree(stat);
443         per_cpu(cpufreq_stats_table, cpu) = NULL;
444         return ret;
445 }
446
447 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
448 {
449         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
450                         policy->last_cpu);
451
452         pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
453                         policy->cpu, policy->last_cpu);
454         per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
455                         policy->last_cpu);
456         per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
457         stat->cpu = policy->cpu;
458 }
459
460 static void cpufreq_powerstats_create(unsigned int cpu,
461                 struct cpufreq_frequency_table *table, int count) {
462         unsigned int alloc_size, i = 0, j = 0, ret = 0;
463         struct cpufreq_power_stats *powerstats;
464         struct device_node *cpu_node;
465         char device_path[16];
466
467         powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
468                         GFP_KERNEL);
469         if (!powerstats)
470                 return;
471
472         /* Allocate memory for freq table per cpu as well as clockticks per
473          * freq*/
474         alloc_size = count * sizeof(unsigned int) +
475                 count * sizeof(unsigned int);
476         powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
477         if (!powerstats->curr) {
478                 kfree(powerstats);
479                 return;
480         }
481         powerstats->freq_table = powerstats->curr + count;
482
483         spin_lock(&cpufreq_stats_lock);
484         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
485                 unsigned int freq = table[i].frequency;
486
487                 if (freq == CPUFREQ_ENTRY_INVALID)
488                         continue;
489                 powerstats->freq_table[j++] = freq;
490         }
491         powerstats->state_num = j;
492
493         snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
494         cpu_node = of_find_node_by_path(device_path);
495         if (cpu_node) {
496                 ret = of_property_read_u32_array(cpu_node, "current",
497                                 powerstats->curr, count);
498                 if (ret) {
499                         kfree(powerstats->curr);
500                         kfree(powerstats);
501                         powerstats = NULL;
502                 }
503         }
504         per_cpu(cpufreq_power_stats, cpu) = powerstats;
505         spin_unlock(&cpufreq_stats_lock);
506 }
507
508 static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
509 {
510         unsigned int lhs = *(const unsigned int *)(lhs_ptr);
511         unsigned int rhs = *(const unsigned int *)(rhs_ptr);
512         if (lhs < rhs)
513                 return -1;
514         if (lhs > rhs)
515                 return 1;
516         return 0;
517 }
518
519 static bool check_all_freq_table(unsigned int freq)
520 {
521         int i;
522         for (i = 0; i < all_freq_table->table_size; i++) {
523                 if (freq == all_freq_table->freq_table[i])
524                         return true;
525         }
526         return false;
527 }
528
529 static void create_all_freq_table(void)
530 {
531         all_freq_table = kzalloc(sizeof(struct all_freq_table),
532                         GFP_KERNEL);
533         if (!all_freq_table)
534                 pr_warn("could not allocate memory for all_freq_table\n");
535         return;
536 }
537
538 static void add_all_freq_table(unsigned int freq)
539 {
540         unsigned int size;
541         size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
542         all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
543                         size, GFP_ATOMIC);
544         if (IS_ERR(all_freq_table->freq_table)) {
545                 pr_warn("Could not reallocate memory for freq_table\n");
546                 all_freq_table->freq_table = NULL;
547                 return;
548         }
549         all_freq_table->freq_table[all_freq_table->table_size++] = freq;
550 }
551
552 static void cpufreq_allstats_create(unsigned int cpu,
553                 struct cpufreq_frequency_table *table, int count)
554 {
555         int i , j = 0;
556         unsigned int alloc_size;
557         struct all_cpufreq_stats *all_stat;
558         bool sort_needed = false;
559
560         all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
561                         GFP_KERNEL);
562         if (!all_stat) {
563                 pr_warn("Cannot allocate memory for cpufreq stats\n");
564                 return;
565         }
566
567         /*Allocate memory for freq table per cpu as well as clockticks per freq*/
568         alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
569         all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
570         if (!all_stat->time_in_state) {
571                 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
572                 kfree(all_stat);
573                 all_stat = NULL;
574                 return;
575         }
576         all_stat->freq_table = (unsigned int *)
577                 (all_stat->time_in_state + count);
578
579         spin_lock(&cpufreq_stats_lock);
580         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
581                 unsigned int freq = table[i].frequency;
582                 if (freq == CPUFREQ_ENTRY_INVALID)
583                         continue;
584                 all_stat->freq_table[j++] = freq;
585                 if (all_freq_table && !check_all_freq_table(freq)) {
586                         add_all_freq_table(freq);
587                         sort_needed = true;
588                 }
589         }
590         if (sort_needed)
591                 sort(all_freq_table->freq_table, all_freq_table->table_size,
592                                 sizeof(unsigned int), &compare_for_sort, NULL);
593         all_stat->state_num = j;
594         per_cpu(all_cpufreq_stats, cpu) = all_stat;
595         spin_unlock(&cpufreq_stats_lock);
596 }
597
598 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
599                 unsigned long val, void *data)
600 {
601         int ret, count = 0, i;
602         struct cpufreq_policy *policy = data;
603         struct cpufreq_frequency_table *table;
604         unsigned int cpu_num, cpu = policy->cpu;
605
606         if (val == CPUFREQ_UPDATE_POLICY_CPU) {
607                 cpufreq_stats_update_policy_cpu(policy);
608                 return 0;
609         }
610
611         if (val != CPUFREQ_NOTIFY)
612                 return 0;
613         table = cpufreq_frequency_get_table(cpu);
614         if (!table)
615                 return 0;
616
617         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
618                 unsigned int freq = table[i].frequency;
619
620                 if (freq == CPUFREQ_ENTRY_INVALID)
621                         continue;
622                 count++;
623         }
624
625         if (!per_cpu(all_cpufreq_stats, cpu))
626                 cpufreq_allstats_create(cpu, table, count);
627
628         for_each_possible_cpu(cpu_num) {
629                 if (!per_cpu(cpufreq_power_stats, cpu_num))
630                         cpufreq_powerstats_create(cpu_num, table, count);
631         }
632
633         ret = cpufreq_stats_create_table(policy, table, count);
634         if (ret)
635                 return ret;
636         return 0;
637 }
638
639 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
640                 unsigned long val, void *data)
641 {
642         struct cpufreq_freqs *freq = data;
643         struct cpufreq_stats *stat;
644         int old_index, new_index;
645
646         if (val != CPUFREQ_POSTCHANGE)
647                 return 0;
648
649         stat = per_cpu(cpufreq_stats_table, freq->cpu);
650         if (!stat)
651                 return 0;
652
653         old_index = stat->last_index;
654         new_index = freq_table_get_index(stat, freq->new);
655
656         /* We can't do stat->time_in_state[-1]= .. */
657         if (old_index == -1 || new_index == -1)
658                 return 0;
659
660         cpufreq_stats_update(freq->cpu);
661
662         if (old_index == new_index)
663                 return 0;
664
665         spin_lock(&cpufreq_stats_lock);
666         stat->last_index = new_index;
667 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
668         stat->trans_table[old_index * stat->max_state + new_index]++;
669 #endif
670         stat->total_trans++;
671         spin_unlock(&cpufreq_stats_lock);
672         return 0;
673 }
674
675 static int cpufreq_stats_create_table_cpu(unsigned int cpu)
676 {
677         struct cpufreq_policy *policy;
678         struct cpufreq_frequency_table *table;
679         int i, count, cpu_num, ret = -ENODEV;
680
681         policy = cpufreq_cpu_get(cpu);
682         if (!policy)
683                 return -ENODEV;
684
685         table = cpufreq_frequency_get_table(cpu);
686         if (!table)
687                 goto out;
688
689         count = 0;
690         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
691                 unsigned int freq = table[i].frequency;
692
693                 if (freq != CPUFREQ_ENTRY_INVALID)
694                         count++;
695         }
696
697         if (!per_cpu(all_cpufreq_stats, cpu))
698                 cpufreq_allstats_create(cpu, table, count);
699
700         for_each_possible_cpu(cpu_num) {
701                 if (!per_cpu(cpufreq_power_stats, cpu_num))
702                         cpufreq_powerstats_create(cpu_num, table, count);
703         }
704
705         ret = cpufreq_stats_create_table(policy, table, count);
706
707 out:
708         cpufreq_cpu_put(policy);
709         return ret;
710 }
711
712 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
713                                                unsigned long action,
714                                                void *hcpu)
715 {
716         unsigned int cpu = (unsigned long)hcpu;
717
718         switch (action) {
719         case CPU_ONLINE:
720         case CPU_ONLINE_FROZEN:
721                 cpufreq_update_policy(cpu);
722                 break;
723         case CPU_DOWN_PREPARE:
724         case CPU_DOWN_PREPARE_FROZEN:
725                 cpufreq_stats_free_sysfs(cpu);
726                 break;
727         case CPU_DEAD:
728         case CPU_DEAD_FROZEN:
729                 cpufreq_stats_free_table(cpu);
730                 break;
731         case CPU_DOWN_FAILED:
732         case CPU_DOWN_FAILED_FROZEN:
733                 cpufreq_stats_create_table_cpu(cpu);
734                 break;
735         }
736         return NOTIFY_OK;
737 }
738
739 /* priority=1 so this will get called before cpufreq_remove_dev */
740 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
741         .notifier_call = cpufreq_stat_cpu_callback,
742         .priority = 1,
743 };
744
745 static struct notifier_block notifier_policy_block = {
746         .notifier_call = cpufreq_stat_notifier_policy
747 };
748
749 static struct notifier_block notifier_trans_block = {
750         .notifier_call = cpufreq_stat_notifier_trans
751 };
752
753 static int cpufreq_stats_setup(void)
754 {
755         int ret;
756         unsigned int cpu;
757
758         spin_lock_init(&cpufreq_stats_lock);
759         ret = cpufreq_register_notifier(&notifier_policy_block,
760                                 CPUFREQ_POLICY_NOTIFIER);
761         if (ret)
762                 return ret;
763
764         register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
765         for_each_online_cpu(cpu)
766                 cpufreq_update_policy(cpu);
767
768         ret = cpufreq_register_notifier(&notifier_trans_block,
769                                 CPUFREQ_TRANSITION_NOTIFIER);
770         if (ret) {
771                 cpufreq_unregister_notifier(&notifier_policy_block,
772                                 CPUFREQ_POLICY_NOTIFIER);
773                 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
774                 for_each_online_cpu(cpu)
775                         cpufreq_stats_free_table(cpu);
776                 return ret;
777         }
778
779         create_all_freq_table();
780         ret = sysfs_create_file(cpufreq_global_kobject,
781                         &_attr_all_time_in_state.attr);
782         if (ret)
783                 pr_warn("Cannot create sysfs file for cpufreq stats\n");
784
785         ret = sysfs_create_file(cpufreq_global_kobject,
786                         &_attr_current_in_state.attr);
787         if (ret)
788                 pr_warn("Cannot create sysfs file for cpufreq current stats\n");
789
790         return 0;
791 }
792
793 static void cpufreq_stats_cleanup(void)
794 {
795         unsigned int cpu;
796
797         cpufreq_unregister_notifier(&notifier_policy_block,
798                         CPUFREQ_POLICY_NOTIFIER);
799         cpufreq_unregister_notifier(&notifier_trans_block,
800                         CPUFREQ_TRANSITION_NOTIFIER);
801         unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
802         for_each_online_cpu(cpu) {
803                 cpufreq_stats_free_table(cpu);
804                 cpufreq_stats_free_sysfs(cpu);
805         }
806         cpufreq_allstats_free();
807         cpufreq_powerstats_free();
808 }
809
810 #ifdef CONFIG_BL_SWITCHER
811 static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
812                                         unsigned long action, void *_arg)
813 {
814         switch (action) {
815         case BL_NOTIFY_PRE_ENABLE:
816         case BL_NOTIFY_PRE_DISABLE:
817                 cpufreq_stats_cleanup();
818                 break;
819
820         case BL_NOTIFY_POST_ENABLE:
821         case BL_NOTIFY_POST_DISABLE:
822                 cpufreq_stats_setup();
823                 break;
824
825         default:
826                 return NOTIFY_DONE;
827         }
828
829         return NOTIFY_OK;
830 }
831
832 static struct notifier_block switcher_notifier = {
833         .notifier_call = cpufreq_stats_switcher_notifier,
834 };
835 #endif
836
837 static int __init cpufreq_stats_init(void)
838 {
839         int ret;
840         spin_lock_init(&cpufreq_stats_lock);
841
842         ret = cpufreq_stats_setup();
843 #ifdef CONFIG_BL_SWITCHER
844         if (!ret)
845                 bL_switcher_register_notifier(&switcher_notifier);
846 #endif
847         return ret;
848 }
849
850 static void __exit cpufreq_stats_exit(void)
851 {
852 #ifdef CONFIG_BL_SWITCHER
853         bL_switcher_unregister_notifier(&switcher_notifier);
854 #endif
855         cpufreq_stats_cleanup();
856 }
857
858 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
859 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
860                                 "through sysfs filesystem");
861 MODULE_LICENSE("GPL");
862
863 module_init(cpufreq_stats_init);
864 module_exit(cpufreq_stats_exit);