2 * Copyright (C) 2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
39 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
40 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
42 #define FREQ_DBG(fmt, args...) do {} while(0)
43 #define FREQ_LOG(fmt, args...) do {} while(0)
45 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
47 /* Frequency table index must be sequential starting at 0 */
48 static struct cpufreq_frequency_table default_freq_table[] = {
49 {.frequency = 312 * 1000, .index = 875 * 1000},
50 {.frequency = 504 * 1000, .index = 925 * 1000},
51 {.frequency = 816 * 1000, .index = 975 * 1000},
52 {.frequency = 1008 * 1000, .index = 1075 * 1000},
53 {.frequency = 1200 * 1000, .index = 1150 * 1000},
54 {.frequency = 1416 * 1000, .index = 1250 * 1000},
55 {.frequency = 1608 * 1000, .index = 1350 * 1000},
56 {.frequency = CPUFREQ_TABLE_END},
58 static struct cpufreq_frequency_table *freq_table = default_freq_table;
59 /*********************************************************/
60 /* additional symantics for "relation" in cpufreq with pm */
61 #define DISABLE_FURTHER_CPUFREQ 0x10
62 #define ENABLE_FURTHER_CPUFREQ 0x20
63 #define MASK_FURTHER_CPUFREQ 0x30
64 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
65 #define CPUFREQ_PRIVATE 0x100
66 static int no_cpufreq_access;
67 static unsigned int suspend_freq = 816 * 1000;
68 static unsigned int suspend_volt = 1000000; // 1V
69 static unsigned int low_battery_freq = 600 * 1000;
70 static unsigned int low_battery_capacity = 5; // 5%
71 static bool is_booting = true;
72 static struct workqueue_struct *freq_wq;
73 static DEFINE_MUTEX(cpufreq_mutex);
74 static bool gpu_is_mali400;
75 struct dvfs_node *clk_cpu_dvfs_node = NULL;
76 struct dvfs_node *clk_gpu_dvfs_node = NULL;
77 struct dvfs_node *clk_vepu_dvfs_node = NULL;
78 /*******************************************************/
79 static unsigned int cpufreq_get_rate(unsigned int cpu)
81 if (clk_cpu_dvfs_node)
82 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
87 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
90 if (policy && policy->governor)
91 c = policy->governor->name[0];
92 return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
95 static unsigned int get_freq_from_table(unsigned int max_freq)
98 unsigned int target_freq = 0;
99 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
100 unsigned int freq = freq_table[i].frequency;
101 if (freq <= max_freq && target_freq < freq) {
106 target_freq = max_freq;
110 /**********************thermal limit**************************/
111 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
113 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
114 static unsigned int temp_limit_freq = -1;
115 module_param(temp_limit_freq, uint, 0444);
117 static struct cpufreq_frequency_table temp_limits[4][4] = {
119 {.frequency = -1, .index = 50},
120 {.frequency = -1, .index = 55},
121 {.frequency = -1, .index = 60},
122 {.frequency = 1608 * 1000, .index = 75},
124 {.frequency = 1800 * 1000, .index = 50},
125 {.frequency = 1608 * 1000, .index = 55},
126 {.frequency = 1416 * 1000, .index = 60},
127 {.frequency = 1200 * 1000, .index = 75},
129 {.frequency = 1608 * 1000, .index = 50},
130 {.frequency = 1416 * 1000, .index = 55},
131 {.frequency = 1200 * 1000, .index = 60},
132 {.frequency = 1008 * 1000, .index = 75},
134 {.frequency = 1416 * 1000, .index = 50},
135 {.frequency = 1200 * 1000, .index = 55},
136 {.frequency = 1008 * 1000, .index = 60},
137 {.frequency = 816 * 1000, .index = 75},
141 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
142 {.frequency = 1008 * 1000, .index = 100},
145 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
146 {.frequency = 1008 * 1000, .index = 0},
149 static int get_temp(void)
154 static char sys_state;
155 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
161 if (copy_from_user(&state, buffer, 1)) {
169 static const struct file_operations sys_state_fops = {
170 .owner = THIS_MODULE,
171 .write = sys_state_write,
174 static struct miscdevice sys_state_dev = {
175 .fops = &sys_state_fops,
177 .minor = MISC_DYNAMIC_MINOR,
180 static void cpufreq_temp_limit_work_func(struct work_struct *work)
182 static bool in_perf = false;
183 struct cpufreq_policy *policy;
185 unsigned int new_freq = -1;
186 unsigned long delay = HZ / 10; // 100ms
187 unsigned int nr_cpus = num_online_cpus();
188 const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
189 size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
193 if (sys_state == '1') {
195 if (gpu_is_mali400) {
196 unsigned int gpu_irqs[2];
197 gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
199 gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
201 if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
202 limits_table = temp_limits_cpu_perf;
203 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
205 limits_table = temp_limits_gpu_perf;
206 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
210 limits_table = temp_limits_cpu_perf;
211 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
213 } else if (in_perf) {
216 static u64 last_time_in_idle = 0;
217 static u64 last_time_in_idle_timestamp = 0;
218 u64 time_in_idle = 0, now;
223 for_each_online_cpu(cpu) {
224 time_in_idle += get_cpu_idle_time_us(cpu, &now);
226 delta_time = now - last_time_in_idle_timestamp;
227 delta_idle = time_in_idle - last_time_in_idle;
228 last_time_in_idle = time_in_idle;
229 last_time_in_idle_timestamp = now;
230 delta_idle += delta_time >> 4; // +6.25%
231 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
232 limits_table = temp_limits[0];
233 else if (delta_idle > (nr_cpus - 2) * delta_time)
234 limits_table = temp_limits[1];
235 else if (delta_idle > (nr_cpus - 3) * delta_time)
236 limits_table = temp_limits[2];
237 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
240 for (i = 0; i < limits_size; i++) {
241 if (temp >= limits_table[i].index) {
242 new_freq = limits_table[i].frequency;
246 if (temp_limit_freq != new_freq) {
247 unsigned int cur_freq;
248 temp_limit_freq = new_freq;
249 cur_freq = cpufreq_get_rate(0);
250 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
251 if (cur_freq > temp_limit_freq) {
252 policy = cpufreq_cpu_get(0);
253 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
254 cpufreq_cpu_put(policy);
258 queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
261 static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
263 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
265 struct cpufreq_policy *policy = data;
267 if (val != CPUFREQ_NOTIFY)
270 if (cpufreq_is_ondemand(policy)) {
271 FREQ_DBG("queue work\n");
272 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
274 FREQ_DBG("cancel work\n");
275 cancel_delayed_work_sync(&cpufreq_temp_limit_work);
281 static struct notifier_block notifier_policy_block = {
282 .notifier_call = cpufreq_notifier_policy
285 static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
288 struct cpufreq_frequency_table *table;
290 table = temp_limits[0];
291 for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
292 table[i].frequency = get_freq_from_table(table[i].frequency);
294 table = temp_limits_cpu_perf;
295 for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
296 table[i].frequency = get_freq_from_table(table[i].frequency);
298 table = temp_limits_gpu_perf;
299 for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
300 table[i].frequency = get_freq_from_table(table[i].frequency);
302 misc_register(&sys_state_dev);
303 if (cpufreq_is_ondemand(policy)) {
304 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
306 cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
309 static void cpufreq_temp_limit_exit(void)
311 cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
313 cancel_delayed_work(&cpufreq_temp_limit_work);
316 static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
317 static inline void cpufreq_temp_limit_exit(void) {}
320 static int cpufreq_verify(struct cpufreq_policy *policy)
324 return cpufreq_frequency_table_verify(policy, freq_table);
327 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
331 struct cpufreq_freqs freqs;
332 struct cpufreq_policy *policy;
334 freqs.new = rate / 1000;
335 freqs.old = clk_get_rate(clk) / 1000;
337 for_each_online_cpu(freqs.cpu) {
338 policy = cpufreq_cpu_get(freqs.cpu);
339 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
340 cpufreq_cpu_put(policy);
343 FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
345 ret = clk_set_rate(clk, rate);
349 * Note that loops_per_jiffy is not updated on SMP systems in
350 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
351 * on frequency transition. We need to update all dependent CPUs.
353 for_each_possible_cpu(i) {
354 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
358 freqs.new = clk_get_rate(clk) / 1000;
360 for_each_online_cpu(freqs.cpu) {
361 policy = cpufreq_cpu_get(freqs.cpu);
362 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
363 cpufreq_cpu_put(policy);
370 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
373 gpu_is_mali400 = cpu_is_rk3188();
375 clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
376 if (clk_gpu_dvfs_node){
377 clk_enable_dvfs(clk_gpu_dvfs_node);
379 dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000);
382 clk_vepu_dvfs_node = clk_get_dvfs_node("clk_vepu");
383 if (clk_vepu_dvfs_node){
384 clk_enable_dvfs(clk_vepu_dvfs_node);
385 dvfs_clk_set_rate(clk_vepu_dvfs_node, 198000000);
388 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
389 if (!clk_cpu_dvfs_node){
392 dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
393 freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
394 if (freq_table == NULL) {
395 freq_table = default_freq_table;
398 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
399 if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
400 suspend_freq = freq_table[i].frequency;
401 v = freq_table[i].index;
405 low_battery_freq = get_freq_from_table(low_battery_freq);
406 clk_enable_dvfs(clk_cpu_dvfs_node);
408 freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
409 cpufreq_temp_limit_init(policy);
411 printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
415 static int cpufreq_init(struct cpufreq_policy *policy)
419 if (policy->cpu == 0) {
420 cpu0_err = cpufreq_init_cpu0(policy);
427 cpufreq_frequency_table_cpuinfo(policy, freq_table);
429 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
432 policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
434 policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
437 * On SMP configuartion, both processors share the voltage
438 * and clock. So both CPUs needs to be scaled together and hence
439 * needs software co-ordination. Use cpufreq affected_cpus
440 * interface to handle this scenario. Additional is_smp() check
441 * is to keep SMP_ON_UP build working.
444 cpumask_setall(policy->cpus);
450 static int cpufreq_exit(struct cpufreq_policy *policy)
452 if (policy->cpu != 0)
455 cpufreq_frequency_table_cpuinfo(policy, freq_table);
456 clk_put_dvfs_node(clk_cpu_dvfs_node);
457 cpufreq_temp_limit_exit();
459 flush_workqueue(freq_wq);
460 destroy_workqueue(freq_wq);
467 static struct freq_attr *cpufreq_attr[] = {
468 &cpufreq_freq_attr_scaling_available_freqs,
472 //#ifdef CONFIG_POWER_SUPPLY
474 extern int rk_get_system_battery_capacity(void);
476 static int rk_get_system_battery_capacity(void) { return 100; }
479 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
481 bool is_ondemand = cpufreq_is_ondemand(policy);
487 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
488 if (boottime_ms > 60 * MSEC_PER_SEC) {
490 } else if (target_freq > low_battery_freq &&
491 rk_get_system_battery_capacity() <= low_battery_capacity) {
492 target_freq = low_battery_freq;
496 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
498 static unsigned int ondemand_target = 816 * 1000;
500 target_freq = ondemand_target;
502 ondemand_target = target_freq;
506 * If the new frequency is more than the thermal max allowed
507 * frequency, go ahead and scale the mpu device to proper frequency.
509 target_freq = min(target_freq, temp_limit_freq);
515 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
517 unsigned int i, new_freq = target_freq, new_rate, cur_rate;
522 FREQ_ERR("no freq table!\n");
526 mutex_lock(&cpufreq_mutex);
528 is_private = relation & CPUFREQ_PRIVATE;
529 relation &= ~CPUFREQ_PRIVATE;
531 if (relation & ENABLE_FURTHER_CPUFREQ)
533 if (no_cpufreq_access) {
534 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
538 if (relation & DISABLE_FURTHER_CPUFREQ)
540 relation &= ~MASK_FURTHER_CPUFREQ;
542 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
544 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
547 new_freq = freq_table[i].frequency;
548 if (!no_cpufreq_access)
549 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
551 new_rate = new_freq * 1000;
552 cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
553 FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
554 if (new_rate == cur_rate)
556 ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
559 FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
560 mutex_unlock(&cpufreq_mutex);
565 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
567 int ret = NOTIFY_DONE;
568 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
573 if (!cpufreq_is_ondemand(policy))
577 case PM_SUSPEND_PREPARE:
578 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
585 case PM_POST_RESTORE:
586 case PM_POST_SUSPEND:
587 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
592 cpufreq_cpu_put(policy);
596 static struct notifier_block cpufreq_pm_notifier = {
597 .notifier_call = cpufreq_pm_notifier_event,
600 static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
602 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
606 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
607 cpufreq_cpu_put(policy);
613 static struct notifier_block cpufreq_reboot_notifier = {
614 .notifier_call = cpufreq_reboot_notifier_event,
617 static struct cpufreq_driver cpufreq_driver = {
618 .flags = CPUFREQ_CONST_LOOPS,
619 .verify = cpufreq_verify,
620 .target = cpufreq_target,
621 .get = cpufreq_get_rate,
622 .init = cpufreq_init,
623 .exit = cpufreq_exit,
625 .attr = cpufreq_attr,
628 static int __init cpufreq_driver_init(void)
630 register_pm_notifier(&cpufreq_pm_notifier);
631 register_reboot_notifier(&cpufreq_reboot_notifier);
632 return cpufreq_register_driver(&cpufreq_driver);
635 device_initcall(cpufreq_driver_init);