ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / cpufreq.c
1 /*
2  * Copyright (C) 2012-2013 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 //#define DEBUG 1
15 #define pr_fmt(fmt) "cpufreq: " fmt
16 #include <linux/clk.h>
17 #include <linux/cpufreq.h>
18 #include <linux/err.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/init.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <asm/smp_plat.h>
26 #include <asm/cpu.h>
27 #include <mach/dvfs.h>
28 #include <linux/delay.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/fs.h>
31 #include <linux/string.h>
32 #include <linux/earlysuspend.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
35 #include <mach/ddr.h>
36 #include <linux/cpu.h>
37 #ifdef DEBUG
38 #define FREQ_PRINTK_DBG(fmt, args...) pr_debug(fmt, ## args)
39 #define FREQ_PRINTK_LOG(fmt, args...) pr_debug(fmt, ## args)
40 #else
41 #define FREQ_PRINTK_DBG(fmt, args...) do {} while(0)
42 #define FREQ_PRINTK_LOG(fmt, args...) do {} while(0)
43 #endif
44 #define FREQ_PRINTK_ERR(fmt, args...) pr_err(fmt, ## args)
45
46 /* Frequency table index must be sequential starting at 0 */
47 static struct cpufreq_frequency_table default_freq_table[] = {
48         {.frequency = 816 * 1000, .index = 1100 * 1000},
49         {.frequency = CPUFREQ_TABLE_END},
50 };
51
52 static struct cpufreq_frequency_table *freq_table = default_freq_table;
53 static unsigned int max_freq = -1;
54
55 /*********************************************************/
56
57 /* additional symantics for "relation" in cpufreq with pm */
58 #define DISABLE_FURTHER_CPUFREQ         0x10
59 #define ENABLE_FURTHER_CPUFREQ          0x20
60 #define MASK_FURTHER_CPUFREQ            0x30
61 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
62 #define CPUFREQ_PRIVATE                 0x100
63 static int no_cpufreq_access;
64 static unsigned int suspend_freq = 816 * 1000;
65
66 static struct workqueue_struct *freq_wq;
67 static struct clk *cpu_clk;
68 static struct clk *cpu_pll;
69 static struct clk *cpu_gpll;
70
71
72 static DEFINE_MUTEX(cpufreq_mutex);
73
74 static struct clk *gpu_clk;
75 static struct clk *ddr_clk;
76 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
77 #define GPU_MAX_RATE 350*1000*1000
78 #endif
79
80 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate, dvfs_set_rate_callback set_rate);
81
82 /*******************************************************/
83 static unsigned int rk30_getspeed(unsigned int cpu)
84 {
85         unsigned long rate;
86
87         if (cpu >= NR_CPUS)
88                 return 0;
89
90         rate = clk_get_rate(cpu_clk) / 1000;
91         return rate;
92 }
93
94 static bool rk30_cpufreq_is_ondemand_policy(struct cpufreq_policy *policy)
95 {
96         char c = 0;
97         if (policy && policy->governor)
98                 c = policy->governor->name[0];
99         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
100 }
101
102 /**********************thermal limit**************************/
103 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
104 #define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
105 #endif
106
107 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
108 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work);
109
110 static DECLARE_DELAYED_WORK(rk30_cpufreq_temp_limit_work, rk30_cpufreq_temp_limit_work_func);
111
112 static unsigned int temp_limt_freq = -1;
113 module_param(temp_limt_freq, uint, 0444);
114
115 #define TEMP_LIMIT_FREQ 816000
116
117 static const struct cpufreq_frequency_table temp_limits[] = {
118         {.frequency = 1416 * 1000, .index = 50},
119         {.frequency = 1200 * 1000, .index = 55},
120         {.frequency = 1008 * 1000, .index = 60},
121         {.frequency =  816 * 1000, .index = 75},
122 };
123
124 static const struct cpufreq_frequency_table temp_limits_high[] = {
125         {.frequency =  816 * 1000, .index = 100},
126 };
127
128 extern int rk30_tsadc_get_temp(unsigned int chn);
129
130 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work)
131 {
132         struct cpufreq_policy *policy;
133         int temp, i;
134         unsigned int new = -1;
135         unsigned long delay = HZ;
136         const struct cpufreq_frequency_table *limits_table = temp_limits;
137         size_t limits_size = ARRAY_SIZE(temp_limits);
138         unsigned int gpu_irqs[2];
139         gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
140
141         temp = rk30_tsadc_get_temp(0);
142         FREQ_PRINTK_LOG("cpu_thermal(%d)\n", temp);
143
144         gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
145         if (clk_get_rate(gpu_clk) > GPU_MAX_RATE) {
146                 delay = HZ / 20;
147                 if ((gpu_irqs[1] - gpu_irqs[0]) < 3) {
148                         limits_table = temp_limits_high;
149                         limits_size = ARRAY_SIZE(temp_limits_high);
150                 }
151         }
152         for (i = 0; i < limits_size; i++) {
153                 if (temp > limits_table[i].index) {
154                         new = limits_table[i].frequency;
155                 }
156         }
157         if (temp_limt_freq != new) {
158                 temp_limt_freq = new;
159                 FREQ_PRINTK_DBG("temp_limit set rate %d kHz\n", temp_limt_freq);
160                 policy = cpufreq_cpu_get(0);
161                 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
162                 cpufreq_cpu_put(policy);
163         }
164
165         queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, delay);
166 }
167
168 static int rk30_cpufreq_notifier_policy(struct notifier_block *nb,
169                                         unsigned long val, void *data)
170 {
171         struct cpufreq_policy *policy = data;
172
173         if (val != CPUFREQ_NOTIFY)
174                 return 0;
175
176         if (rk30_cpufreq_is_ondemand_policy(policy)) {
177                 FREQ_PRINTK_DBG("queue work\n");
178                 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 0);
179         } else {
180                 FREQ_PRINTK_DBG("cancel work\n");
181                 cancel_delayed_work_sync(&rk30_cpufreq_temp_limit_work);
182         }
183
184         return 0;
185 }
186
187 static struct notifier_block notifier_policy_block = {
188         .notifier_call = rk30_cpufreq_notifier_policy
189 };
190 #endif
191
192 /************************************dvfs tst************************************/
193 //#define CPU_FREQ_DVFS_TST
194 #ifdef CPU_FREQ_DVFS_TST
195 static unsigned int freq_dvfs_tst_rate;
196 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work);
197 static DECLARE_DELAYED_WORK(rk30_cpufreq_dvsf_tst_work, rk30_cpufreq_dvsf_tst_work_func);
198 static int test_count;
199 #define TEST_FRE_NUM 11
200 static int test_tlb_rate[TEST_FRE_NUM] = { 504, 1008, 504, 1200, 252, 816, 1416, 252, 1512, 252, 816 };
201 //static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,126,1512,126,816};
202
203 #define TEST_GPU_NUM 3
204
205 static int test_tlb_gpu[TEST_GPU_NUM] = { 360, 400, 180 };
206 static int test_tlb_ddr[TEST_GPU_NUM] = { 401, 200, 500 };
207
208 static int gpu_ddr = 0;
209
210 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work)
211 {
212         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
213
214         gpu_ddr++;
215
216 #if 0
217         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,ddr%u,gpu%u\n",
218                         test_tlb_ddr[gpu_ddr % TEST_GPU_NUM],
219                         test_tlb_gpu[gpu_ddr % TEST_GPU_NUM]);
220         clk_set_rate(ddr_clk, test_tlb_ddr[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
221         clk_set_rate(gpu_clk, test_tlb_gpu[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
222 #endif
223
224         test_count++;
225         freq_dvfs_tst_rate = test_tlb_rate[test_count % TEST_FRE_NUM] * 1000;
226         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,cpu set rate %d\n", freq_dvfs_tst_rate);
227         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
228         cpufreq_cpu_put(policy);
229
230         queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(1000));
231 }
232 #endif /* CPU_FREQ_DVFS_TST */
233
234 /***********************************************************************/
235 static int rk30_verify_speed(struct cpufreq_policy *policy)
236 {
237         if (!freq_table)
238                 return -EINVAL;
239         return cpufreq_frequency_table_verify(policy, freq_table);
240 }
241 static int rk30_cpu_init(struct cpufreq_policy *policy)
242 {
243         if (policy->cpu == 0) {
244                 int i;
245                 
246                 gpu_clk = clk_get(NULL, "gpu");
247                 if (!IS_ERR(gpu_clk))
248                         clk_enable_dvfs(gpu_clk);
249
250                 ddr_clk = clk_get(NULL, "ddr");
251                 if (!IS_ERR(ddr_clk))
252                         clk_enable_dvfs(ddr_clk);
253                 
254                 cpu_clk = clk_get(NULL, "cpu");
255                 
256                 cpu_pll = clk_get(NULL, "arm_pll");
257                 
258                 cpu_gpll = clk_get(NULL, "arm_gpll");
259                 if (IS_ERR(cpu_clk))
260                         return PTR_ERR(cpu_clk);
261
262                 dvfs_clk_register_set_rate_callback(cpu_clk, cpufreq_scale_rate_for_dvfs);
263                 freq_table = dvfs_get_freq_volt_table(cpu_clk);
264                 if (freq_table == NULL) {
265                         freq_table = default_freq_table;
266                 }
267                 max_freq = freq_table[0].frequency;
268                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
269                         max_freq = max(max_freq, freq_table[i].frequency);
270                 }
271                 clk_enable_dvfs(cpu_clk);
272
273 #if !defined(CONFIG_ARCH_RK3066B)
274 #if defined(CONFIG_ARCH_RK30)
275                 /* Limit gpu frequency between 133M to 400M */
276                 dvfs_clk_enable_limit(gpu_clk, 133000000, 400000000);
277 #endif
278 #endif
279
280                 freq_wq = create_singlethread_workqueue("rk30_cpufreqd");
281 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
282                 if (rk30_cpufreq_is_ondemand_policy(policy)) {
283                         queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 0*HZ);
284                 }
285                 cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
286 #endif
287 #ifdef CPU_FREQ_DVFS_TST
288                 queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(20 * 1000));
289 #endif
290         }
291         //set freq min max
292         cpufreq_frequency_table_cpuinfo(policy, freq_table);
293         //sys nod
294         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
295
296         policy->cur = rk30_getspeed(0);
297
298         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
299
300         /*
301          * On rk30 SMP configuartion, both processors share the voltage
302          * and clock. So both CPUs needs to be scaled together and hence
303          * needs software co-ordination. Use cpufreq affected_cpus
304          * interface to handle this scenario. Additional is_smp() check
305          * is to keep SMP_ON_UP build working.
306          */
307         if (is_smp())
308                 cpumask_setall(policy->cpus);
309
310         return 0;
311 }
312
313 static int rk30_cpu_exit(struct cpufreq_policy *policy)
314 {
315         if (policy->cpu != 0)
316                 return 0;
317
318         cpufreq_frequency_table_cpuinfo(policy, freq_table);
319         clk_put(cpu_clk);
320 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
321         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
322         if (freq_wq)
323                 cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
324 #endif
325         if (freq_wq) {
326                 flush_workqueue(freq_wq);
327                 destroy_workqueue(freq_wq);
328                 freq_wq = NULL;
329         }
330
331         return 0;
332 }
333
334 static struct freq_attr *rk30_cpufreq_attr[] = {
335         &cpufreq_freq_attr_scaling_available_freqs,
336         NULL,
337 };
338
339 /**************************earlysuspend freeze cpu frequency******************************/
340 static struct early_suspend ff_early_suspend;
341
342 #define FILE_GOV_MODE "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"
343 #define FILE_SETSPEED "/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed"
344 #define FILE_CUR_FREQ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq"
345
346 #define FF_DEBUG(fmt, args...) printk(KERN_DEBUG "FREEZE FREQ DEBUG:\t"fmt, ##args)
347 #define FF_ERROR(fmt, args...) printk(KERN_ERR "FREEZE FREQ ERROR:\t"fmt, ##args)
348
349 static int ff_read(char *file_path, char *buf)
350 {
351         struct file *file = NULL;
352         mm_segment_t old_fs;
353         loff_t offset = 0;
354
355         FF_DEBUG("read %s\n", file_path);
356         file = filp_open(file_path, O_RDONLY, 0);
357
358         if (IS_ERR(file)) {
359                 FF_ERROR("%s error open file  %s\n", __func__, file_path);
360                 return -1;
361         }
362
363         old_fs = get_fs();
364         set_fs(KERNEL_DS);
365
366         file->f_op->read(file, (char *)buf, 32, &offset);
367         sscanf(buf, "%s", buf);
368
369         set_fs(old_fs);
370         filp_close(file, NULL);  
371
372         file = NULL;
373
374         return 0;
375
376 }
377
378 static int ff_write(char *file_path, char *buf)
379 {
380         struct file *file = NULL;
381         mm_segment_t old_fs;
382         loff_t offset = 0;
383
384         FF_DEBUG("write %s %s size = %d\n", file_path, buf, strlen(buf));
385         file = filp_open(file_path, O_RDWR, 0);
386
387         if (IS_ERR(file)) {
388                 FF_ERROR("%s error open file  %s\n", __func__, file_path);
389                 return -1;
390         }
391
392         old_fs = get_fs();
393         set_fs(KERNEL_DS);
394
395         file->f_op->write(file, (char *)buf, strlen(buf), &offset);
396
397         set_fs(old_fs);
398         filp_close(file, NULL);  
399
400         file = NULL;
401
402         return 0;
403
404 }
405
406 static void ff_scale_votlage(char *name, int volt)
407 {
408         struct regulator* regulator;
409         int ret = 0;
410
411         FF_DEBUG("enter %s\n", __func__);
412         regulator = dvfs_get_regulator(name);
413         if (!regulator) {
414                 FF_ERROR("get regulator %s ERROR\n", name);
415                 return ;
416         }
417
418         ret = regulator_set_voltage(regulator, volt, volt);
419         if (ret != 0) {
420                 FF_ERROR("set voltage error %s %d, ret = %d\n", name, volt, ret);
421         }
422
423 }
424 int clk_set_parent_force(struct clk *clk, struct clk *parent);
425 static void ff_early_suspend_func(struct early_suspend *h)
426 {
427         char buf[32];
428         FF_DEBUG("enter %s\n", __func__);
429         if (ff_read(FILE_GOV_MODE, buf) != 0) {
430                 FF_ERROR("read current governor error\n");
431                 return ;
432         } else {
433                 FF_DEBUG("current governor = %s\n", buf);
434         }
435
436         strcpy(buf, "userspace");
437         if (ff_write(FILE_GOV_MODE, buf) != 0) {
438                 FF_ERROR("set current governor error\n");
439                 return ;
440         }
441
442         strcpy(buf, "252000");
443         if (ff_write(FILE_SETSPEED, buf) != 0) {
444                 FF_ERROR("set speed to 252MHz error\n");
445                 return ;
446         }
447         
448         if (!IS_ERR(cpu_pll)&&!IS_ERR(cpu_gpll)&&!IS_ERR(cpu_clk))
449         {
450                 clk_set_parent_force(cpu_clk,cpu_gpll);
451                 clk_set_rate(cpu_clk,300*1000*1000);
452                 
453                 clk_disable_dvfs(cpu_clk);
454         }       
455         if (!IS_ERR(gpu_clk))
456                 dvfs_clk_enable_limit(gpu_clk,75*1000*1000,133*1000*1000);
457         
458         //ff_scale_votlage("vdd_cpu", 1000000);
459         //ff_scale_votlage("vdd_core", 1000000);
460 #ifdef CONFIG_HOTPLUG_CPU
461         cpu_down(1);
462 #endif
463 }
464
465 static void ff_early_resume_func(struct early_suspend *h)
466 {
467         char buf[32];
468         FF_DEBUG("enter %s\n", __func__);
469
470         if (!IS_ERR(cpu_pll)&&!IS_ERR(cpu_gpll)&&!IS_ERR(cpu_clk))
471         {
472                 clk_set_parent_force(cpu_clk,cpu_pll);
473                 clk_set_rate(cpu_clk,300*1000*1000);
474                 clk_enable_dvfs(cpu_clk);
475         }       
476         
477         if (!IS_ERR(gpu_clk))
478                 dvfs_clk_disable_limit(gpu_clk);
479 #ifdef CONFIG_HOTPLUG_CPU
480         cpu_up(1);
481 #endif
482         if (ff_read(FILE_GOV_MODE, buf) != 0) {
483                 FF_ERROR("read current governor error\n");
484                 return ;
485         } else {
486                 FF_DEBUG("current governor = %s\n", buf);
487         }
488
489         if (ff_read(FILE_CUR_FREQ, buf) != 0) {
490                 FF_ERROR("read current frequency error\n");
491                 return ;
492         } else {
493                 FF_DEBUG("current frequency = %s\n", buf);
494         }
495
496         strcpy(buf, "interactive");
497         if (ff_write(FILE_GOV_MODE, buf) != 0) {
498                 FF_ERROR("set current governor error\n");
499                 return ;
500         }
501         
502         strcpy(buf, "interactive");
503         if (ff_write(FILE_GOV_MODE, buf) != 0) {
504                 FF_ERROR("set current governor error\n");
505                 return ;
506         }
507 }
508
509 static int __init ff_init(void)
510 {
511         FF_DEBUG("enter %s\n", __func__);
512 #ifdef CONFIG_HAS_EARLYSUSPEND
513         ff_early_suspend.suspend = ff_early_suspend_func;
514         ff_early_suspend.resume = ff_early_resume_func;
515         ff_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 100;
516 #endif
517         register_early_suspend(&ff_early_suspend);
518         return 0;
519 }
520
521 static void __exit ff_exit(void)
522 {
523         FF_DEBUG("enter %s\n", __func__);
524         unregister_early_suspend(&ff_early_suspend);
525 }
526
527 /**************************target freq******************************/
528 static unsigned int cpufreq_scale_limt(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
529 {
530         bool is_ondemand = rk30_cpufreq_is_ondemand_policy(policy);
531         static bool is_booting = true;
532         unsigned int i;
533
534         if (!is_ondemand)
535                 goto out;
536
537 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
538         if (is_booting && target_freq >= 1600 * 1000) {
539                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
540                 if (boottime_ms > 30 * MSEC_PER_SEC) {
541                         is_booting = false;
542                 } else {
543                         target_freq = 1416 * 1000;
544                 }
545         }
546 #endif
547
548 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
549         {
550                 static unsigned int ondemand_target = TEMP_LIMIT_FREQ;
551                 if (is_private)
552                         target_freq = ondemand_target;
553                 else
554                         ondemand_target = target_freq;
555         }
556
557         if (target_freq != policy->max && target_freq > policy->cur && policy->cur >= TEMP_LIMIT_FREQ) {
558                 if (cpufreq_frequency_table_target(policy, freq_table, policy->cur + 1, CPUFREQ_RELATION_L, &i) == 0) {
559                         unsigned int f = freq_table[i].frequency;
560                         if (f < target_freq) {
561                                 target_freq = f;
562                         }
563                 }
564         }
565         /*
566          * If the new frequency is more than the thermal max allowed
567          * frequency, go ahead and scale the mpu device to proper frequency.
568          */
569         target_freq = min(target_freq, temp_limt_freq);
570 #endif
571 out:
572 #ifdef CPU_FREQ_DVFS_TST
573         if (freq_dvfs_tst_rate) {
574                 target_freq = freq_dvfs_tst_rate;
575                 freq_dvfs_tst_rate = 0;
576         }
577 #endif
578         return target_freq;
579 }
580
581 int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate, dvfs_set_rate_callback set_rate)
582 {
583         unsigned int i;
584         int ret = -EINVAL;
585         struct cpufreq_freqs freqs;
586
587         freqs.new = rate / 1000;
588         freqs.old = rk30_getspeed(0);
589
590         for_each_online_cpu(freqs.cpu) {
591                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
592         }
593         FREQ_PRINTK_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
594         ret = set_rate(clk, rate);
595
596 #ifdef CONFIG_SMP
597         /*
598          * Note that loops_per_jiffy is not updated on SMP systems in
599          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
600          * on frequency transition. We need to update all dependent CPUs.
601          */
602         for_each_possible_cpu(i) {
603                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
604         }
605 #endif
606
607         freqs.new = rk30_getspeed(0);
608         /* notifiers */
609         for_each_online_cpu(freqs.cpu) {
610                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
611         }
612         return ret;
613
614 }
615
616 static int rk30_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
617 {
618         unsigned int i, new_rate = 0;
619         int ret = 0;
620         bool is_private;
621
622         if (!freq_table) {
623                 FREQ_PRINTK_ERR("no freq table!\n");
624                 return -EINVAL;
625         }
626
627         mutex_lock(&cpufreq_mutex);
628
629         is_private = relation & CPUFREQ_PRIVATE;
630         relation &= ~CPUFREQ_PRIVATE;
631
632         if (relation & ENABLE_FURTHER_CPUFREQ)
633                 no_cpufreq_access--;
634         if (no_cpufreq_access) {
635 #ifdef CONFIG_PM_VERBOSE
636                 pr_err("denied access to %s as it is disabled temporarily\n", __func__);
637 #endif
638                 ret = -EINVAL;
639                 goto out;
640         }
641         if (relation & DISABLE_FURTHER_CPUFREQ)
642                 no_cpufreq_access++;
643         relation &= ~MASK_FURTHER_CPUFREQ;
644
645         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
646         if (ret) {
647                 FREQ_PRINTK_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
648                 goto out;
649         }
650         new_rate = freq_table[i].frequency;
651         if (!no_cpufreq_access)
652                 new_rate = cpufreq_scale_limt(new_rate, policy, is_private);
653
654         FREQ_PRINTK_LOG("cpufreq req=%u,new=%u(was=%u)\n", target_freq, new_rate, rk30_getspeed(0));
655         if (new_rate == rk30_getspeed(0))
656                 goto out;
657         ret = clk_set_rate(cpu_clk, new_rate * 1000);
658 out:
659         mutex_unlock(&cpufreq_mutex);
660         FREQ_PRINTK_DBG("cpureq set rate (%u) end\n", new_rate);
661         return ret;
662 }
663
664 static int rk30_cpufreq_pm_notifier_event(struct notifier_block *this,
665                                           unsigned long event, void *ptr)
666 {
667         int ret = NOTIFY_DONE;
668         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
669
670         if (!policy)
671                 return ret;
672
673         if (!rk30_cpufreq_is_ondemand_policy(policy))
674                 goto out;
675
676         switch (event) {
677         case PM_SUSPEND_PREPARE:
678                 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
679                 if (ret < 0) {
680                         ret = NOTIFY_BAD;
681                         goto out;
682                 }
683                 ret = NOTIFY_OK;
684                 break;
685         case PM_POST_RESTORE:
686         case PM_POST_SUSPEND:
687                 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
688                 ret = NOTIFY_OK;
689                 break;
690         }
691 out:
692         cpufreq_cpu_put(policy);
693         return ret;
694 }
695
696 static struct notifier_block rk30_cpufreq_pm_notifier = {
697         .notifier_call = rk30_cpufreq_pm_notifier_event,
698 };
699
700 static int rk30_cpufreq_reboot_notifier_event(struct notifier_block *this,
701                                               unsigned long event, void *ptr)
702 {
703         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
704
705         if (policy) {
706                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
707                 cpufreq_cpu_put(policy);
708         }
709
710         return NOTIFY_OK;
711 }
712
713 static struct notifier_block rk30_cpufreq_reboot_notifier = {
714         .notifier_call = rk30_cpufreq_reboot_notifier_event,
715 };
716
717 static struct cpufreq_driver rk30_cpufreq_driver = {
718         .flags = CPUFREQ_CONST_LOOPS,
719         .verify = rk30_verify_speed,
720         .target = rk30_target,
721         .get = rk30_getspeed,
722         .init = rk30_cpu_init,
723         .exit = rk30_cpu_exit,
724         .name = "rk30",
725         .attr = rk30_cpufreq_attr,
726 };
727
728 static int __init rk30_cpufreq_init(void)
729 {
730         register_pm_notifier(&rk30_cpufreq_pm_notifier);
731         register_reboot_notifier(&rk30_cpufreq_reboot_notifier);
732         return cpufreq_register_driver(&rk30_cpufreq_driver);
733 }
734
735 static void __exit rk30_cpufreq_exit(void)
736 {
737         cpufreq_unregister_driver(&rk30_cpufreq_driver);
738 }
739
740 MODULE_DESCRIPTION("cpufreq driver for rock chip rk30");
741 MODULE_LICENSE("GPL");
742 device_initcall(rk30_cpufreq_init);
743 module_exit(rk30_cpufreq_exit);