Merge tag 'v4.4-rc8'
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
1 /*
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/clk.h>
18 #include <linux/cpufreq.h>
19 #include <linux/err.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/init.h>
22 #include <linux/reboot.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/fs.h>
29 #include <linux/miscdevice.h>
30 #include <linux/string.h>
31 #ifdef CONFIG_ROCKCHIP_CPUQUIET
32 #include <linux/cpuquiet.h>
33 #include <linux/pm_qos.h>
34 #endif
35 #include <linux/rockchip/cpu.h>
36 #include <linux/rockchip/dvfs.h>
37 #include <asm/smp_plat.h>
38 #include <asm/unistd.h>
39 #include <linux/uaccess.h>
40 #include <asm/system_misc.h>
41 #include <linux/cpu.h>
42 #include <linux/of.h>
43 #include <linux/mfd/syscon.h>
44 #include <linux/regmap.h>
45 #include <linux/rockchip/common.h>
46 #include <dt-bindings/clock/rk_system_status.h>
47 #include <linux/platform_device.h>
48 #include <linux/module.h>
49 #include "../../../drivers/clk/rockchip/clk-pd.h"
50
51 #define VERSION "1.0"
52 #define MAX_CLUSTERS 2
53 #define B_CLUSTER       0
54 #define L_CLUSTER       1
55
56 #ifdef DEBUG
57 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
58 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
59 #else
60 #define FREQ_DBG(fmt, args...) do {} while (0)
61 #define FREQ_LOG(fmt, args...) do {} while (0)
62 #endif
63 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
64
65 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
66 /*********************************************************/
67 /* additional symantics for "relation" in cpufreq with pm */
68 #define DISABLE_FURTHER_CPUFREQ         0x10
69 #define ENABLE_FURTHER_CPUFREQ          0x20
70 #define MASK_FURTHER_CPUFREQ            0x30
71 #define CPU_LOW_FREQ    600000    /* KHz */
72 #define CCI_LOW_RATE    288000000 /* Hz */
73 #define CCI_HIGH_RATE   576000000 /* Hz */
74 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
75 #define CPUFREQ_PRIVATE                 0x100
76 static unsigned int no_cpufreq_access[MAX_CLUSTERS] = { 0 };
77 static unsigned int suspend_freq[MAX_CLUSTERS] = { 816 * 1000, 816 * 1000 };
78 static unsigned int suspend_volt = 1100000;
79 static unsigned int low_battery_freq[MAX_CLUSTERS] = { 600 * 1000,
80         600 * 1000 };
81 static unsigned int low_battery_capacity = 5;
82 static bool is_booting = true;
83 static DEFINE_MUTEX(cpufreq_mutex);
84 static struct dvfs_node *clk_cpu_dvfs_node[MAX_CLUSTERS];
85 static struct dvfs_node *clk_gpu_dvfs_node;
86 static struct dvfs_node *clk_ddr_dvfs_node;
87 static cpumask_var_t cluster_policy_mask[MAX_CLUSTERS];
88 static struct clk *aclk_cci;
89 static unsigned long cci_rate;
90 static unsigned int cpu_bl_freq[MAX_CLUSTERS];
91
92 #ifdef CONFIG_ROCKCHIP_CPUQUIET
93 static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
94                                                     unsigned int cpu_freq);
95 static struct cpuquiet_governor rockchip_bl_balanced_governor;
96 #endif
97
98 /*******************************************************/
99 static inline int cpu_to_cluster(int cpu)
100 {
101         int id = topology_physical_package_id(cpu);
102         if (id < 0)
103                 id = 0;
104         return id;
105 }
106
107 static unsigned int rockchip_bl_cpufreq_get_rate(unsigned int cpu)
108 {
109         u32 cur_cluster = cpu_to_cluster(cpu);
110
111         if (clk_cpu_dvfs_node[cur_cluster])
112                 return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
113
114         return 0;
115 }
116
117 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
118 {
119         char c = 0;
120
121         if (policy && policy->governor)
122                 c = policy->governor->name[0];
123         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
124 }
125
126 static unsigned int get_freq_from_table(unsigned int max_freq,
127                                         unsigned int cluster)
128 {
129         unsigned int i;
130         unsigned int target_freq = 0;
131
132         for (i = 0; freq_table[cluster][i].frequency != CPUFREQ_TABLE_END;
133              i++) {
134                 unsigned int freq = freq_table[cluster][i].frequency;
135
136                 if (freq <= max_freq && target_freq < freq)
137                         target_freq = freq;
138         }
139         if (!target_freq)
140                 target_freq = max_freq;
141         return target_freq;
142 }
143
144 static int rockchip_bl_cpufreq_notifier_policy(struct notifier_block *nb,
145                                                unsigned long val,
146                                                void *data)
147 {
148         static unsigned int min_rate = 0, max_rate = -1;
149         struct cpufreq_policy *policy = data;
150         u32 cur_cluster = cpu_to_cluster(policy->cpu);
151
152         if (val != CPUFREQ_ADJUST)
153                 return 0;
154
155         if (cpufreq_is_ondemand(policy)) {
156                 FREQ_DBG("queue work\n");
157                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
158                                       min_rate, max_rate);
159         } else {
160                 FREQ_DBG("cancel work\n");
161                 dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
162                                    &min_rate, &max_rate);
163         }
164
165         return 0;
166 }
167
168 static struct notifier_block notifier_policy_block = {
169         .notifier_call = rockchip_bl_cpufreq_notifier_policy
170 };
171
172 static int rockchip_bl_cpufreq_notifier_trans(struct notifier_block *nb,
173                                               unsigned long val, void *data)
174 {
175         struct cpufreq_freqs *freq = data;
176         unsigned int cluster = cpu_to_cluster(freq->cpu);
177         int ret;
178
179         cpu_bl_freq[cluster] = freq->new;
180
181         switch (val) {
182         case CPUFREQ_PRECHANGE:
183                 if (cpu_bl_freq[B_CLUSTER] > CPU_LOW_FREQ ||
184                     cpu_bl_freq[L_CLUSTER] > CPU_LOW_FREQ) {
185                         if (cci_rate != CCI_HIGH_RATE) {
186                                 ret = clk_set_rate(aclk_cci, CCI_HIGH_RATE);
187                                 if (ret)
188                                         break;
189                                 pr_debug("ccirate %ld-->%d Hz\n",
190                                          cci_rate, CCI_HIGH_RATE);
191                                 cci_rate = CCI_HIGH_RATE;
192                         }
193                 }
194                 break;
195         case CPUFREQ_POSTCHANGE:
196                 if (cpu_bl_freq[B_CLUSTER] <= CPU_LOW_FREQ &&
197                     cpu_bl_freq[L_CLUSTER] <= CPU_LOW_FREQ) {
198                         if (cci_rate != CCI_LOW_RATE) {
199                                 ret = clk_set_rate(aclk_cci, CCI_LOW_RATE);
200                                 if (ret)
201                                         break;
202                                 pr_debug("ccirate %ld-->%d Hz\n",
203                                          cci_rate, CCI_LOW_RATE);
204                                 cci_rate = CCI_LOW_RATE;
205                         }
206                 }
207                 break;
208         }
209
210         return 0;
211 }
212
213 static struct notifier_block notifier_trans_block = {
214         .notifier_call = rockchip_bl_cpufreq_notifier_trans,
215 };
216
217 static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
218 {
219         u32 cur_cluster = cpu_to_cluster(policy->cpu);
220
221         if (!freq_table[cur_cluster])
222                 return -EINVAL;
223         return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
224 }
225
226 static int clk_node_get_cluster_id(struct clk *clk)
227 {
228         int i;
229
230         for (i = 0; i < MAX_CLUSTERS; i++) {
231                 if (clk_cpu_dvfs_node[i]->clk == clk)
232                         return i;
233         }
234         return 0;
235 }
236
237 static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
238                                                    unsigned long rate)
239 {
240         int ret;
241         struct cpufreq_freqs freqs;
242         struct cpufreq_policy *policy;
243         u32 cur_cluster, cpu;
244
245         cur_cluster = clk_node_get_cluster_id(clk);
246         cpu = cpumask_first_and(cluster_policy_mask[cur_cluster],
247                 cpu_online_mask);
248         if (cpu >= nr_cpu_ids)
249                 return -EINVAL;
250         policy = cpufreq_cpu_get(cpu);
251         if (!policy)
252                 return -EINVAL;
253
254         freqs.new = rate / 1000;
255         freqs.old = clk_get_rate(clk) / 1000;
256
257         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
258
259         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
260
261         ret = clk_set_rate(clk, rate);
262
263         freqs.new = clk_get_rate(clk) / 1000;
264
265 #ifdef CONFIG_ROCKCHIP_CPUQUIET
266         rockchip_bl_balanced_cpufreq_transition(cur_cluster, freqs.new);
267 #endif
268
269         /* notifiers */
270         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
271
272         cpufreq_cpu_put(policy);
273         return ret;
274 }
275
276 static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
277 {
278         int v = INT_MAX;
279         int i;
280
281         clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
282
283         if (!clk_cpu_dvfs_node[cluster_id]) {
284                 FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
285                          __func__, cluster_id);
286                 return -EINVAL;
287         }
288         dvfs_clk_register_set_rate_callback(
289                 clk_cpu_dvfs_node[cluster_id],
290                 rockchip_bl_cpufreq_scale_rate_for_dvfs);
291         freq_table[cluster_id] =
292                 dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
293         if (!freq_table[cluster_id]) {
294                 FREQ_ERR("No freq table for cluster %d\n", cluster_id);
295                 return -EINVAL;
296         }
297
298         for (i = 0; freq_table[cluster_id][i].frequency != CPUFREQ_TABLE_END;
299              i++) {
300                 if (freq_table[cluster_id][i].index >= suspend_volt &&
301                     v > freq_table[cluster_id][i].index) {
302                         suspend_freq[cluster_id] =
303                                 freq_table[cluster_id][i].frequency;
304                         v = freq_table[cluster_id][i].index;
305                 }
306         }
307         low_battery_freq[cluster_id] =
308                 get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
309         clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
310         return 0;
311 }
312
313 static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
314 {
315         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
316         if (clk_gpu_dvfs_node)
317                 clk_enable_dvfs(clk_gpu_dvfs_node);
318
319         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
320         if (clk_ddr_dvfs_node)
321                 clk_enable_dvfs(clk_ddr_dvfs_node);
322
323         cluster_cpus_freq_dvfs_init(B_CLUSTER, "clk_core_b");
324         cluster_cpus_freq_dvfs_init(L_CLUSTER, "clk_core_l");
325
326         cpufreq_register_notifier(&notifier_policy_block,
327                                   CPUFREQ_POLICY_NOTIFIER);
328
329         aclk_cci = clk_get(NULL, "aclk_cci");
330         if (!IS_ERR(aclk_cci)) {
331                 cci_rate = clk_get_rate(aclk_cci);
332                 if (clk_cpu_dvfs_node[L_CLUSTER])
333                         cpu_bl_freq[L_CLUSTER] =
334                         clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
335                 if (clk_cpu_dvfs_node[B_CLUSTER])
336                         cpu_bl_freq[B_CLUSTER] =
337                         clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
338                 cpufreq_register_notifier(&notifier_trans_block,
339                                           CPUFREQ_TRANSITION_NOTIFIER);
340         }
341
342         pr_info("version " VERSION ", suspend freq %d %d MHz\n",
343                 suspend_freq[0] / 1000, suspend_freq[1] / 1000);
344         return 0;
345 }
346
347 static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
348 {
349         static int cpu0_err;
350         u32 cur_cluster = cpu_to_cluster(policy->cpu);
351
352         if (policy->cpu == 0)
353                 cpu0_err = rockchip_bl_cpufreq_init_cpu0(policy);
354         if (cpu0_err)
355                 return cpu0_err;
356
357         /* set freq min max */
358         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
359         /* sys nod */
360         cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
361
362         if (cur_cluster < MAX_CLUSTERS) {
363                 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
364                 cpumask_copy(cluster_policy_mask[cur_cluster],
365                              topology_core_cpumask(policy->cpu));
366         }
367
368         policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
369
370         /* make ondemand default sampling_rate to 40000 */
371         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
372
373         return 0;
374 }
375
376 static int rockchip_bl_cpufreq_exit(struct cpufreq_policy *policy)
377 {
378         u32 cur_cluster = cpu_to_cluster(policy->cpu);
379
380         if (policy->cpu == 0) {
381                 cpufreq_unregister_notifier(&notifier_policy_block,
382                                             CPUFREQ_POLICY_NOTIFIER);
383         }
384         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
385         clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
386
387         return 0;
388 }
389
390 static struct freq_attr *rockchip_bl_cpufreq_attr[] = {
391         &cpufreq_freq_attr_scaling_available_freqs,
392         NULL,
393 };
394
395 #ifdef CONFIG_CHARGER_DISPLAY
396 extern int rk_get_system_battery_capacity(void);
397 #else
398 static int rk_get_system_battery_capacity(void)
399 {
400         return 100;
401 }
402 #endif
403
404 static unsigned int
405 rockchip_bl_cpufreq_scale_limit(unsigned int target_freq,
406                                 struct cpufreq_policy *policy, bool is_private)
407 {
408         bool is_ondemand = cpufreq_is_ondemand(policy);
409         u32 cur_cluster = cpu_to_cluster(policy->cpu);
410
411         if (!is_ondemand)
412                 return target_freq;
413
414         if (is_booting) {
415                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
416
417                 if (boottime_ms > 60 * MSEC_PER_SEC) {
418                         is_booting = false;
419                 } else if (target_freq > low_battery_freq[cur_cluster] &&
420                            rk_get_system_battery_capacity() <=
421                            low_battery_capacity) {
422                         target_freq = low_battery_freq[cur_cluster];
423                 }
424         }
425
426         return target_freq;
427 }
428
429 static int rockchip_bl_cpufreq_target(struct cpufreq_policy *policy,
430                                       unsigned int target_freq,
431                                       unsigned int relation)
432 {
433         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
434         int ret = 0;
435         bool is_private;
436         u32 cur_cluster = cpu_to_cluster(policy->cpu);
437
438         if (!freq_table[cur_cluster]) {
439                 FREQ_ERR("no freq table!\n");
440                 return -EINVAL;
441         }
442
443         mutex_lock(&cpufreq_mutex);
444
445         is_private = relation & CPUFREQ_PRIVATE;
446         relation &= ~CPUFREQ_PRIVATE;
447
448         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
449             no_cpufreq_access[cur_cluster])
450                 no_cpufreq_access[cur_cluster]--;
451         if (no_cpufreq_access[cur_cluster]) {
452                 FREQ_LOG("denied access to %s as it is disabled temporarily\n",
453                          __func__);
454                 ret = -EINVAL;
455                 goto out;
456         }
457         if (relation & DISABLE_FURTHER_CPUFREQ)
458                 no_cpufreq_access[cur_cluster]++;
459         relation &= ~MASK_FURTHER_CPUFREQ;
460
461         ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
462                                              target_freq, relation, &i);
463         if (ret) {
464                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
465                 goto out;
466         }
467         new_freq = freq_table[cur_cluster][i].frequency;
468         if (!no_cpufreq_access[cur_cluster])
469                 new_freq =
470                     rockchip_bl_cpufreq_scale_limit(new_freq, policy,
471                                                     is_private);
472
473         new_rate = new_freq * 1000;
474         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
475         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
476                  new_freq, cur_rate / 1000);
477         if (new_rate == cur_rate)
478                 goto out;
479         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
480
481 out:
482         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
483         mutex_unlock(&cpufreq_mutex);
484         return ret;
485 }
486
487 static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
488                                                  unsigned long event, void *ptr)
489 {
490         int ret = NOTIFY_DONE;
491         int i;
492         struct cpufreq_policy *policy;
493         u32 cpu;
494
495         for (i = 0; i < MAX_CLUSTERS; i++) {
496                 cpu = cpumask_first_and(cluster_policy_mask[i],
497                         cpu_online_mask);
498                 if (cpu >= nr_cpu_ids)
499                         continue;
500                 policy = cpufreq_cpu_get(cpu);
501                 if (!policy)
502                         continue;
503
504                 if (!cpufreq_is_ondemand(policy))
505                         goto out;
506
507                 switch (event) {
508                 case PM_SUSPEND_PREPARE:
509                         policy->cur++;
510                         ret = cpufreq_driver_target(policy, suspend_freq[i],
511                                                     DISABLE_FURTHER_CPUFREQ |
512                                                     CPUFREQ_RELATION_H);
513                         if (ret < 0) {
514                                 ret = NOTIFY_BAD;
515                                 goto out;
516                         }
517                         ret = NOTIFY_OK;
518                         break;
519                 case PM_POST_RESTORE:
520                 case PM_POST_SUSPEND:
521                         /* if (target_freq == policy->cur) then
522                            cpufreq_driver_target will return, and
523                            our target will not be called, it casue
524                            ENABLE_FURTHER_CPUFREQ flag invalid,
525                            avoid that. */
526                         policy->cur++;
527                         cpufreq_driver_target(policy, suspend_freq[i],
528                                               ENABLE_FURTHER_CPUFREQ |
529                                               CPUFREQ_RELATION_H);
530                         ret = NOTIFY_OK;
531                         break;
532                 }
533 out:
534                 cpufreq_cpu_put(policy);
535         }
536
537         return ret;
538 }
539
540 static struct notifier_block rockchip_bl_cpufreq_pm_notifier = {
541         .notifier_call = rockchip_bl_cpufreq_pm_notifier_event,
542 };
543
544 static int rockchip_bl_cpufreq_reboot_limit_freq(void)
545 {
546         struct regulator *regulator;
547         int volt = 0;
548         u32 rate;
549         int i;
550
551         dvfs_disable_temp_limit();
552
553         for (i = 0; i < MAX_CLUSTERS; i++) {
554                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
555                                       1000 * suspend_freq[i],
556                                       1000 * suspend_freq[i]);
557                 rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
558         }
559
560         regulator = dvfs_get_regulator("vdd_arm");
561         if (regulator)
562                 volt = regulator_get_voltage(regulator);
563         else
564                 pr_info("get arm regulator failed\n");
565         pr_info("reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
566                 dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
567                 dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
568
569         return 0;
570 }
571
572 static int rockchip_bl_cpufreq_reboot_notifier_event(struct notifier_block
573                                                      *this, unsigned long event,
574                                                      void *ptr)
575 {
576         rockchip_set_system_status(SYS_STATUS_REBOOT);
577         rockchip_bl_cpufreq_reboot_limit_freq();
578
579         return NOTIFY_OK;
580 };
581
582 static struct notifier_block rockchip_bl_cpufreq_reboot_notifier = {
583         .notifier_call = rockchip_bl_cpufreq_reboot_notifier_event,
584 };
585
586 static struct cpufreq_driver rockchip_bl_cpufreq_driver = {
587         .flags = CPUFREQ_CONST_LOOPS,
588         .verify = rockchip_bl_cpufreq_verify,
589         .target = rockchip_bl_cpufreq_target,
590         .get = rockchip_bl_cpufreq_get_rate,
591         .init = rockchip_bl_cpufreq_init,
592         .exit = rockchip_bl_cpufreq_exit,
593         .name = "rockchip-bl",
594         .have_governor_per_policy = true,
595         .attr = rockchip_bl_cpufreq_attr,
596 };
597
598 static const struct of_device_id rockchip_bl_cpufreq_match[] = {
599         {
600                 .compatible = "rockchip,rk3368-cpufreq",
601         },
602         {},
603 };
604 MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
605
606 static int __init rockchip_bl_cpufreq_probe(struct platform_device *pdev)
607 {
608         int ret, i;
609
610         for (i = 0; i < MAX_CLUSTERS; i++) {
611                 if (!alloc_cpumask_var(&cluster_policy_mask[i], GFP_KERNEL))
612                         return -ENOMEM;
613         }
614
615         register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
616         register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
617
618         ret = cpufreq_register_driver(&rockchip_bl_cpufreq_driver);
619
620 #ifdef CONFIG_ROCKCHIP_CPUQUIET
621         ret = cpuquiet_register_governor(&rockchip_bl_balanced_governor);
622 #endif
623
624         return ret;
625 }
626
627 static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
628 {
629         int i;
630
631         for (i = 0; i < MAX_CLUSTERS; i++)
632                 free_cpumask_var(cluster_policy_mask[i]);
633         cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
634         return 0;
635 }
636
637 static struct platform_driver rockchip_bl_cpufreq_platdrv = {
638         .driver = {
639                 .name   = "rockchip-bl-cpufreq",
640                 .owner  = THIS_MODULE,
641                 .of_match_table = rockchip_bl_cpufreq_match,
642         },
643         .remove         = rockchip_bl_cpufreq_remove,
644 };
645
646 module_platform_driver_probe(rockchip_bl_cpufreq_platdrv,
647                              rockchip_bl_cpufreq_probe);
648
649 MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
650 MODULE_LICENSE("GPL");
651
652 #ifdef CONFIG_ROCKCHIP_CPUQUIET
653 extern struct cpumask hmp_slow_cpu_mask;
654
655 enum cpu_speed_balance {
656         CPU_SPEED_BALANCED,
657         CPU_SPEED_BIASED,
658         CPU_SPEED_SKEWED,
659         CPU_SPEED_BOOST,
660 };
661
662 enum balanced_state {
663         IDLE,
664         DOWN,
665         UP,
666 };
667
668 struct idle_info {
669         u64 idle_last_us;
670         u64 idle_current_us;
671 };
672
673 static u64 idleinfo_timestamp_us;
674 static u64 idleinfo_last_timestamp_us;
675 static DEFINE_PER_CPU(struct idle_info, idleinfo);
676 static DEFINE_PER_CPU(unsigned int, cpu_load);
677
678 static struct timer_list load_timer;
679 static bool load_timer_active;
680
681 /* configurable parameters */
682 static unsigned int  balance_level = 60;
683 static unsigned int  idle_bottom_freq[MAX_CLUSTERS];
684 static unsigned int  idle_top_freq[MAX_CLUSTERS];
685 static unsigned int  cpu_freq[MAX_CLUSTERS];
686 static unsigned long up_delay_jiffies;
687 static unsigned long down_delay_jiffies;
688 static unsigned long last_change_time_jiffies;
689 static unsigned int  load_sample_rate_jiffies = 20 / (MSEC_PER_SEC / HZ);
690 static unsigned int  little_high_load = 80;
691 static unsigned int  little_low_load = 20;
692 static unsigned int  big_low_load = 20;
693 static struct workqueue_struct *rockchip_bl_balanced_wq;
694 static struct delayed_work rockchip_bl_balanced_work;
695 static enum balanced_state rockchip_bl_balanced_state;
696 static struct kobject *rockchip_bl_balanced_kobj;
697 static DEFINE_MUTEX(rockchip_bl_balanced_lock);
698 static bool rockchip_bl_balanced_enable;
699
700 #define GOVERNOR_NAME "bl_balanced"
701
702 static u64 get_idle_us(int cpu)
703 {
704         return get_cpu_idle_time(cpu, NULL, 1 /* io_busy */);
705 }
706
707 static void calculate_load_timer(unsigned long data)
708 {
709         int i;
710         u64 elapsed_time;
711
712         if (!load_timer_active)
713                 return;
714
715         idleinfo_last_timestamp_us = idleinfo_timestamp_us;
716         idleinfo_timestamp_us = ktime_to_us(ktime_get());
717         elapsed_time = idleinfo_timestamp_us - idleinfo_last_timestamp_us;
718
719         for_each_present_cpu(i) {
720                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
721                 unsigned int *load = &per_cpu(cpu_load, i);
722                 u64 idle_time;
723
724                 iinfo->idle_last_us = iinfo->idle_current_us;
725                 iinfo->idle_current_us = get_idle_us(i);
726
727                 idle_time = iinfo->idle_current_us - iinfo->idle_last_us;
728                 idle_time *= 100;
729                 do_div(idle_time, elapsed_time);
730                 if (idle_time > 100)
731                         idle_time = 100;
732                 *load = 100 - idle_time;
733         }
734         mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
735 }
736
737 static void start_load_timer(void)
738 {
739         int i;
740
741         if (load_timer_active)
742                 return;
743
744         idleinfo_timestamp_us = ktime_to_us(ktime_get());
745         for_each_present_cpu(i) {
746                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
747
748                 iinfo->idle_current_us = get_idle_us(i);
749         }
750         mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
751
752         load_timer_active = true;
753 }
754
755 static void stop_load_timer(void)
756 {
757         if (!load_timer_active)
758                 return;
759
760         load_timer_active = false;
761         del_timer(&load_timer);
762 }
763
764 static unsigned int get_slowest_cpu(void)
765 {
766         unsigned int cpu = nr_cpu_ids;
767         unsigned long minload = ULONG_MAX;
768         int i;
769
770         for_each_online_cpu(i) {
771                 unsigned int load = per_cpu(cpu_load, i);
772
773                 if ((i > 0) && (minload >= load)) {
774                         cpu = i;
775                         minload = load;
776                 }
777         }
778
779         return cpu;
780 }
781
782 static unsigned int get_offline_big_cpu(void)
783 {
784         struct cpumask big, offline_big;
785
786         cpumask_andnot(&big, cpu_present_mask, &hmp_slow_cpu_mask);
787         cpumask_andnot(&offline_big, &big, cpu_online_mask);
788         return cpumask_first(&offline_big);
789 }
790
791 static unsigned int cpu_highest_speed(void)
792 {
793         unsigned int maxload = 0;
794         int i;
795
796         for_each_online_cpu(i) {
797                 unsigned int load = per_cpu(cpu_load, i);
798
799                 maxload = max(maxload, load);
800         }
801
802         return maxload;
803 }
804
805 static unsigned int count_slow_cpus(unsigned int limit)
806 {
807         unsigned int cnt = 0;
808         int i;
809
810         for_each_online_cpu(i) {
811                 unsigned int load = per_cpu(cpu_load, i);
812
813                 if (load <= limit)
814                         cnt++;
815         }
816
817         return cnt;
818 }
819
820 #define NR_FSHIFT       2
821
822 static unsigned int rt_profile[NR_CPUS] = {
823 /*      1,  2,  3,  4,  5,  6,  7,  8 - on-line cpus target */
824         5,  9, 10, 11, 12, 13, 14,  UINT_MAX
825 };
826
827 static unsigned int nr_run_hysteresis = 2;      /* 0.5 thread */
828 static unsigned int nr_run_last;
829
830 struct runnables_avg_sample {
831         u64 previous_integral;
832         unsigned int avg;
833         bool integral_sampled;
834         u64 prev_timestamp;     /* ns */
835 };
836
837 static DEFINE_PER_CPU(struct runnables_avg_sample, avg_nr_sample);
838
839 static unsigned int get_avg_nr_runnables(void)
840 {
841         unsigned int i, sum = 0;
842         struct runnables_avg_sample *sample;
843         u64 integral, old_integral, delta_integral, delta_time, cur_time;
844
845         cur_time = ktime_to_ns(ktime_get());
846
847         for_each_online_cpu(i) {
848                 sample = &per_cpu(avg_nr_sample, i);
849                 integral = nr_running_integral(i);
850                 old_integral = sample->previous_integral;
851                 sample->previous_integral = integral;
852                 delta_time = cur_time - sample->prev_timestamp;
853                 sample->prev_timestamp = cur_time;
854
855                 if (!sample->integral_sampled) {
856                         sample->integral_sampled = true;
857                         /* First sample to initialize prev_integral, skip
858                          * avg calculation
859                          */
860                         continue;
861                 }
862
863                 if (integral < old_integral) {
864                         /* Overflow */
865                         delta_integral = (ULLONG_MAX - old_integral) + integral;
866                 } else {
867                         delta_integral = integral - old_integral;
868                 }
869
870                 /* Calculate average for the previous sample window */
871                 do_div(delta_integral, delta_time);
872                 sample->avg = delta_integral;
873                 sum += sample->avg;
874         }
875
876         return sum;
877 }
878
879 static bool rockchip_bl_balanced_speed_boost(void)
880 {
881         unsigned int cpu;
882         struct cpumask online_little;
883         unsigned int big_cpu;
884         bool has_low_load_little_cpu = false;
885
886         if (cpu_freq[L_CLUSTER] < idle_top_freq[L_CLUSTER])
887                 return false;
888
889         cpumask_and(&online_little, cpu_online_mask, &hmp_slow_cpu_mask);
890
891         for_each_cpu(cpu, &online_little) {
892                 if (per_cpu(cpu_load, cpu) < little_low_load) {
893                         has_low_load_little_cpu = true;
894                         break;
895                 }
896         }
897
898         for_each_cpu(cpu, &online_little) {
899                 unsigned int load;
900                 unsigned int avg;
901                 struct cpumask online_big;
902                 bool has_low_load_big_cpu;
903
904                 load = per_cpu(cpu_load, cpu);
905                 /* skip low load cpu */
906                 if (load < little_high_load)
907                         continue;
908
909                 avg = per_cpu(avg_nr_sample, cpu).avg;
910                 /*
911                  * skip when we have low load cpu,
912                  * when cpu load is high because run many task.
913                  * we can migrate the task to low load cpu
914                  */
915                 if (has_low_load_little_cpu &&
916                     (avg >> (FSHIFT - NR_FSHIFT)) >= 4)
917                         continue;
918
919                 /*
920                  * found one cpu which is busy by run one thread,
921                  * break if no big cpu offline
922                  */
923                 if (get_offline_big_cpu() >= nr_cpu_ids)
924                         break;
925
926                 cpumask_andnot(&online_big,
927                                cpu_online_mask, &hmp_slow_cpu_mask);
928
929                 has_low_load_big_cpu = false;
930                 for_each_cpu(big_cpu, &online_big) {
931                         unsigned int big_load;
932
933                         big_load = per_cpu(cpu_load, big_cpu);
934                         if (big_load < big_low_load) {
935                                 has_low_load_big_cpu = true;
936                                 break;
937                         }
938                 }
939                 /* if we have idle big cpu, never up new one */
940                 if (has_low_load_big_cpu)
941                         break;
942
943                 return true;
944         }
945
946         return false;
947 }
948
949 static enum cpu_speed_balance rockchip_bl_balanced_speed_balance(void)
950 {
951         unsigned long highest_speed = cpu_highest_speed();
952         unsigned long balanced_speed = highest_speed * balance_level / 100;
953         unsigned long skewed_speed = balanced_speed / 2;
954         unsigned int nr_cpus = num_online_cpus();
955         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
956         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
957         unsigned int avg_nr_run = get_avg_nr_runnables();
958         unsigned int nr_run;
959
960         if (max_cpus > nr_cpu_ids || max_cpus == 0)
961                 max_cpus = nr_cpu_ids;
962
963         if (rockchip_bl_balanced_speed_boost())
964                 return CPU_SPEED_BOOST;
965
966         /* balanced: freq targets for all CPUs are above 60% of highest speed
967            biased: freq target for at least one CPU is below 60% threshold
968            skewed: freq targets for at least 2 CPUs are below 30% threshold */
969         for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile); nr_run++) {
970                 unsigned int nr_threshold = rt_profile[nr_run - 1];
971
972                 if (nr_run_last <= nr_run)
973                         nr_threshold += nr_run_hysteresis;
974                 if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
975                         break;
976         }
977         nr_run_last = nr_run;
978
979         if ((count_slow_cpus(skewed_speed) >= 2 ||
980              nr_run < nr_cpus ||
981              (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
982               cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
983              nr_cpus > max_cpus) &&
984             nr_cpus > min_cpus)
985                 return CPU_SPEED_SKEWED;
986
987         if ((count_slow_cpus(balanced_speed) >= 1 ||
988              nr_run <= nr_cpus ||
989              (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
990               cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
991              nr_cpus == max_cpus) &&
992             nr_cpus >= min_cpus)
993                 return CPU_SPEED_BIASED;
994
995         return CPU_SPEED_BALANCED;
996 }
997
998 static void rockchip_bl_balanced_work_func(struct work_struct *work)
999 {
1000         bool up = false;
1001         unsigned int cpu = nr_cpu_ids;
1002         unsigned long now = jiffies;
1003         struct workqueue_struct *wq = rockchip_bl_balanced_wq;
1004         struct delayed_work *dwork = to_delayed_work(work);
1005         enum cpu_speed_balance balance;
1006
1007         mutex_lock(&rockchip_bl_balanced_lock);
1008
1009         if (!rockchip_bl_balanced_enable)
1010                 goto out;
1011
1012         switch (rockchip_bl_balanced_state) {
1013         case IDLE:
1014                 break;
1015         case DOWN:
1016                 cpu = get_slowest_cpu();
1017                 if (cpu < nr_cpu_ids) {
1018                         up = false;
1019                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1020                 } else {
1021                         stop_load_timer();
1022                 }
1023                 break;
1024         case UP:
1025                 balance = rockchip_bl_balanced_speed_balance();
1026                 switch (balance) {
1027                 case CPU_SPEED_BOOST:
1028                         cpu = get_offline_big_cpu();
1029                         if (cpu < nr_cpu_ids)
1030                                 up = true;
1031                         break;
1032                 /* cpu speed is up and balanced - one more on-line */
1033                 case CPU_SPEED_BALANCED:
1034                         cpu = cpumask_next_zero(0, cpu_online_mask);
1035                         if (cpu < nr_cpu_ids)
1036                                 up = true;
1037                         break;
1038                 /* cpu speed is up, but skewed - remove one core */
1039                 case CPU_SPEED_SKEWED:
1040                         cpu = get_slowest_cpu();
1041                         if (cpu < nr_cpu_ids)
1042                                 up = false;
1043                         break;
1044                 /* cpu speed is up, but under-utilized - do nothing */
1045                 case CPU_SPEED_BIASED:
1046                 default:
1047                         break;
1048                 }
1049                 queue_delayed_work(wq, dwork, up_delay_jiffies);
1050                 break;
1051         default:
1052                 pr_err("%s: invalid cpuquiet governor state %d\n",
1053                        __func__, rockchip_bl_balanced_state);
1054         }
1055
1056         if (!up && ((now - last_change_time_jiffies) < down_delay_jiffies))
1057                 cpu = nr_cpu_ids;
1058
1059         if (cpu < nr_cpu_ids) {
1060                 last_change_time_jiffies = now;
1061                 if (up)
1062                         cpuquiet_wake_cpu(cpu, false);
1063                 else
1064                         cpuquiet_quiesence_cpu(cpu, false);
1065         }
1066
1067 out:
1068         mutex_unlock(&rockchip_bl_balanced_lock);
1069 }
1070
1071 static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
1072                                                     unsigned int new_cpu_freq)
1073 {
1074         struct workqueue_struct *wq;
1075         struct delayed_work *dwork;
1076
1077         mutex_lock(&rockchip_bl_balanced_lock);
1078
1079         if (!rockchip_bl_balanced_enable)
1080                 goto out;
1081
1082         wq = rockchip_bl_balanced_wq;
1083         dwork = &rockchip_bl_balanced_work;
1084         cpu_freq[cluster] = new_cpu_freq;
1085
1086         switch (rockchip_bl_balanced_state) {
1087         case IDLE:
1088                 if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
1089                     cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
1090                         rockchip_bl_balanced_state = UP;
1091                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1092                         start_load_timer();
1093                 } else if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
1094                            cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
1095                         rockchip_bl_balanced_state = DOWN;
1096                         queue_delayed_work(wq, dwork, down_delay_jiffies);
1097                         start_load_timer();
1098                 }
1099                 break;
1100         case DOWN:
1101                 if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
1102                     cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
1103                         rockchip_bl_balanced_state = UP;
1104                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1105                         start_load_timer();
1106                 }
1107                 break;
1108         case UP:
1109                 if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
1110                     cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
1111                         rockchip_bl_balanced_state = DOWN;
1112                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1113                         start_load_timer();
1114                 }
1115                 break;
1116         default:
1117                 pr_err("%s: invalid cpuquiet governor state %d\n",
1118                        __func__, rockchip_bl_balanced_state);
1119         }
1120
1121 out:
1122         mutex_unlock(&rockchip_bl_balanced_lock);
1123 }
1124
1125 static void delay_callback(struct cpuquiet_attribute *attr)
1126 {
1127         unsigned long val;
1128
1129         if (attr) {
1130                 val = (*((unsigned long *)(attr->param)));
1131                 (*((unsigned long *)(attr->param))) = msecs_to_jiffies(val);
1132         }
1133 }
1134
1135 #define CPQ_BASIC_ATTRIBUTE_B(_name, _mode, _type) \
1136         static struct cpuquiet_attribute _name ## _b_attr = {           \
1137                 .attr = {.name = __stringify(_name ## _b), .mode = _mode },\
1138                 .show = show_ ## _type ## _attribute,                   \
1139                 .store = store_ ## _type ## _attribute,                 \
1140                 .param = &_name[B_CLUSTER],                             \
1141 }
1142 #define CPQ_BASIC_ATTRIBUTE_L(_name, _mode, _type) \
1143         static struct cpuquiet_attribute _name ## _l_attr = {           \
1144                 .attr = {.name = __stringify(_name ## _l), .mode = _mode },\
1145                 .show = show_ ## _type ## _attribute,                   \
1146                 .store = store_ ## _type ## _attribute,                 \
1147                 .param = &_name[L_CLUSTER],                             \
1148 }
1149 CPQ_BASIC_ATTRIBUTE(balance_level, 0644, uint);
1150 CPQ_BASIC_ATTRIBUTE_B(idle_bottom_freq, 0644, uint);
1151 CPQ_BASIC_ATTRIBUTE_L(idle_bottom_freq, 0644, uint);
1152 CPQ_BASIC_ATTRIBUTE_B(idle_top_freq, 0644, uint);
1153 CPQ_BASIC_ATTRIBUTE_L(idle_top_freq, 0644, uint);
1154 CPQ_BASIC_ATTRIBUTE(load_sample_rate_jiffies, 0644, uint);
1155 CPQ_BASIC_ATTRIBUTE(nr_run_hysteresis, 0644, uint);
1156 CPQ_BASIC_ATTRIBUTE(little_high_load, 0644, uint);
1157 CPQ_BASIC_ATTRIBUTE(little_low_load, 0644, uint);
1158 CPQ_BASIC_ATTRIBUTE(big_low_load, 0644, uint);
1159 CPQ_ATTRIBUTE(up_delay_jiffies, 0644, ulong, delay_callback);
1160 CPQ_ATTRIBUTE(down_delay_jiffies, 0644, ulong, delay_callback);
1161
1162 #define MAX_BYTES 100
1163
1164 static ssize_t show_rt_profile(struct cpuquiet_attribute *attr, char *buf)
1165 {
1166         char buffer[MAX_BYTES];
1167         unsigned int i;
1168         int size = 0;
1169
1170         buffer[0] = 0;
1171         for (i = 0; i < ARRAY_SIZE(rt_profile); i++) {
1172                 size += snprintf(buffer + size, sizeof(buffer) - size,
1173                                 "%u ", rt_profile[i]);
1174         }
1175         return snprintf(buf, sizeof(buffer), "%s\n", buffer);
1176 }
1177
1178 static ssize_t store_rt_profile(struct cpuquiet_attribute *attr,
1179                                 const char *buf, size_t count)
1180 {
1181         int ret, i = 0;
1182         char *val, *str, input[MAX_BYTES];
1183         unsigned int profile[ARRAY_SIZE(rt_profile)];
1184
1185         if (!count || count >= MAX_BYTES)
1186                 return -EINVAL;
1187         strncpy(input, buf, count);
1188         input[count] = '\0';
1189         str = input;
1190         memcpy(profile, rt_profile, sizeof(rt_profile));
1191         while ((val = strsep(&str, " ")) != NULL) {
1192                 if (*val == '\0')
1193                         continue;
1194                 if (i == ARRAY_SIZE(rt_profile) - 1)
1195                         break;
1196                 ret = kstrtouint(val, 10, &profile[i]);
1197                 if (ret)
1198                         return -EINVAL;
1199                 i++;
1200         }
1201
1202         memcpy(rt_profile, profile, sizeof(profile));
1203
1204         return count;
1205 }
1206 CPQ_ATTRIBUTE_CUSTOM(rt_profile, 0644,
1207                      show_rt_profile, store_rt_profile);
1208
1209 static struct attribute *rockchip_bl_balanced_attributes[] = {
1210         &balance_level_attr.attr,
1211         &idle_bottom_freq_b_attr.attr,
1212         &idle_bottom_freq_l_attr.attr,
1213         &idle_top_freq_b_attr.attr,
1214         &idle_top_freq_l_attr.attr,
1215         &up_delay_jiffies_attr.attr,
1216         &down_delay_jiffies_attr.attr,
1217         &load_sample_rate_jiffies_attr.attr,
1218         &nr_run_hysteresis_attr.attr,
1219         &rt_profile_attr.attr,
1220         &little_high_load_attr.attr,
1221         &little_low_load_attr.attr,
1222         &big_low_load_attr.attr,
1223         NULL,
1224 };
1225
1226 static const struct sysfs_ops rockchip_bl_balanced_sysfs_ops = {
1227         .show = cpuquiet_auto_sysfs_show,
1228         .store = cpuquiet_auto_sysfs_store,
1229 };
1230
1231 static struct kobj_type rockchip_bl_balanced_ktype = {
1232         .sysfs_ops = &rockchip_bl_balanced_sysfs_ops,
1233         .default_attrs = rockchip_bl_balanced_attributes,
1234 };
1235
1236 static int rockchip_bl_balanced_sysfs(void)
1237 {
1238         int err;
1239         struct kobject *kobj;
1240
1241         kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
1242
1243         if (!kobj)
1244                 return -ENOMEM;
1245
1246         err = cpuquiet_kobject_init(kobj, &rockchip_bl_balanced_ktype,
1247                                     GOVERNOR_NAME);
1248
1249         if (err)
1250                 kfree(kobj);
1251
1252         rockchip_bl_balanced_kobj = kobj;
1253
1254         return err;
1255 }
1256
1257 static void rockchip_bl_balanced_stop(void)
1258 {
1259         mutex_lock(&rockchip_bl_balanced_lock);
1260
1261         rockchip_bl_balanced_enable = false;
1262         /* now we can force the governor to be idle */
1263         rockchip_bl_balanced_state = IDLE;
1264
1265         mutex_unlock(&rockchip_bl_balanced_lock);
1266
1267         cancel_delayed_work_sync(&rockchip_bl_balanced_work);
1268
1269         destroy_workqueue(rockchip_bl_balanced_wq);
1270         rockchip_bl_balanced_wq = NULL;
1271         del_timer_sync(&load_timer);
1272
1273         kobject_put(rockchip_bl_balanced_kobj);
1274         kfree(rockchip_bl_balanced_kobj);
1275         rockchip_bl_balanced_kobj = NULL;
1276 }
1277
1278 static int rockchip_bl_balanced_start(void)
1279 {
1280         int err, count, cluster;
1281         struct cpufreq_frequency_table *table;
1282         unsigned int initial_freq;
1283
1284         err = rockchip_bl_balanced_sysfs();
1285         if (err)
1286                 return err;
1287
1288         up_delay_jiffies = msecs_to_jiffies(100);
1289         down_delay_jiffies = msecs_to_jiffies(2000);
1290
1291         for (cluster = 0; cluster < MAX_CLUSTERS; cluster++) {
1292                 table = freq_table[cluster];
1293                 if (!table)
1294                         return -EINVAL;
1295
1296                 for (count = 0; table[count].frequency != CPUFREQ_TABLE_END;
1297                      count++)
1298                         ;
1299
1300                 if (count < 4)
1301                         return -EINVAL;
1302
1303                 idle_top_freq[cluster] = table[(count / 2) - 1].frequency;
1304                 idle_bottom_freq[cluster] = table[(count / 2) - 2].frequency;
1305         }
1306
1307         rockchip_bl_balanced_wq
1308                 = alloc_workqueue(GOVERNOR_NAME, WQ_UNBOUND | WQ_FREEZABLE, 1);
1309         if (!rockchip_bl_balanced_wq)
1310                 return -ENOMEM;
1311
1312         INIT_DELAYED_WORK(&rockchip_bl_balanced_work,
1313                           rockchip_bl_balanced_work_func);
1314
1315         init_timer(&load_timer);
1316         load_timer.function = calculate_load_timer;
1317
1318         mutex_lock(&rockchip_bl_balanced_lock);
1319         rockchip_bl_balanced_enable = true;
1320         if (clk_cpu_dvfs_node[L_CLUSTER])
1321                 cpu_freq[L_CLUSTER] =
1322                         clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
1323         if (clk_cpu_dvfs_node[B_CLUSTER])
1324                 cpu_freq[B_CLUSTER] =
1325                         clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
1326         mutex_unlock(&rockchip_bl_balanced_lock);
1327
1328         /* Kick start the state machine */
1329         initial_freq = cpufreq_get(0);
1330         if (initial_freq)
1331                 rockchip_bl_balanced_cpufreq_transition(L_CLUSTER,
1332                                                         initial_freq);
1333
1334         return 0;
1335 }
1336
1337 static struct cpuquiet_governor rockchip_bl_balanced_governor = {
1338         .name           = GOVERNOR_NAME,
1339         .start          = rockchip_bl_balanced_start,
1340         .stop           = rockchip_bl_balanced_stop,
1341         .owner          = THIS_MODULE,
1342 };
1343 #endif