rk30: cpufreq: notify frequency change once
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rk30 / cpufreq.c
1 /* arch/arm/mach-rk29/cpufreq.c
2  *
3  * Copyright (C) 2010, 2011 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <linux/tick.h>
22 #include <linux/workqueue.h>
23 #include <asm/smp_plat.h>
24 #include <asm/cpu.h>
25 #include <mach/dvfs.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28
29 #define FREQ_PRINTK_DBG(fmt, args...) {while(0);}//pr_debug(fmt, ## args)
30 #define FREQ_PRINTK_ERR(fmt, args...) pr_err(fmt, ## args)
31 #define FREQ_PRINTK_LOG(fmt, args...) pr_debug(fmt, ## args)
32 /* Frequency table index must be sequential starting at 0 */
33 static struct cpufreq_frequency_table default_freq_table[] = {
34         {.frequency = 816*1000, .index  = 1080*1000},
35         {.frequency = CPUFREQ_TABLE_END},
36 };
37 static struct cpufreq_frequency_table *freq_table = default_freq_table;
38
39 /*********************************************************/
40
41 /* additional symantics for "relation" in cpufreq with pm */
42 #define DISABLE_FURTHER_CPUFREQ         0x10
43 #define ENABLE_FURTHER_CPUFREQ          0x20
44 #define MASK_FURTHER_CPUFREQ            0x30
45 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
46 static int no_cpufreq_access;
47 static unsigned int suspend_freq = 816 * 1000;
48
49 #define NUM_CPUS        2
50 static struct workqueue_struct *freq_wq;
51 static struct clk *cpu_clk;
52 //static struct clk *gpu_clk;
53 //static struct clk *ddr_clk;
54 //static DEFINE_PER_CPU(unsigned int, target_rate);
55 static DEFINE_MUTEX(cpufreq_mutex);
56
57 static int cpufreq_scale_rate_for_dvfs(struct clk * clk,unsigned long rate,dvfs_set_rate_callback set_rate);
58
59 /*******************************************************/
60 static unsigned int rk30_getspeed(unsigned int cpu)
61 {
62         unsigned long rate;
63
64         if (cpu >= NR_CPUS)
65                 return 0;
66
67         rate = clk_get_rate(cpu_clk) / 1000;
68         return rate;
69 }
70 static bool rk30_cpufreq_is_ondemand_policy(struct cpufreq_policy *policy)
71 {
72         char c = 0;
73         if (policy && policy->governor)
74                 c = policy->governor->name[0];
75         return (c == 'o' || c == 'i' || c == 'c');
76 }
77
78 /**********************thermal limit**************************/
79 #define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
80
81 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
82 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work);
83
84 static DECLARE_DELAYED_WORK(rk30_cpufreq_temp_limit_work, rk30_cpufreq_temp_limit_work_func);
85
86 static unsigned int temp_limt_freq=0;
87 #define _TEMP_LIMIT_FREQ 816000
88 #define TEMP_NOR 55
89 #define TEMP_HOT 80
90
91 #define TEMP_NOR_DELAY 5000 //2s
92 unsigned int get_detect_temp_dly(int temp)
93 {
94         unsigned int dly=TEMP_NOR_DELAY;
95         if(temp>TEMP_NOR)
96                 dly-=(temp-TEMP_NOR)*25;
97         //FREQ_PRINTK_DBG("cpu_thermal delay(%d)\n",dly);
98         return dly;
99
100 extern int rk30_tsadc_get_temp(unsigned int chn);
101
102 #define get_cpu_thermal() rk30_tsadc_get_temp(0)
103 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work)
104 {
105         struct cpufreq_policy *policy;
106         int temp_rd=0,avg_temp,i;       
107         avg_temp=get_cpu_thermal();
108         
109         for(i=0;i<4;i++)
110         {
111                 temp_rd=get_cpu_thermal();
112                 avg_temp=(avg_temp+temp_rd)>>1;
113         }
114         FREQ_PRINTK_LOG("cpu_thermal(%d)\n",temp_rd);
115         
116         if(avg_temp>TEMP_HOT)
117         {
118                 temp_limt_freq=_TEMP_LIMIT_FREQ;
119                 policy = cpufreq_cpu_get(0);
120                 FREQ_PRINTK_DBG("temp_limit set rate %d kHz\n",temp_limt_freq);
121                 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
122                 cpufreq_cpu_put(policy);
123         }
124         else
125                 temp_limt_freq=0;
126         
127         queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 
128                 msecs_to_jiffies(get_detect_temp_dly(avg_temp)));
129 }
130
131 static int rk30_cpufreq_notifier_policy(struct notifier_block *nb,
132                 unsigned long val, void *data)
133 {
134         struct cpufreq_policy *policy = data;
135
136         if (val != CPUFREQ_NOTIFY)
137                 return 0;
138
139         if (rk30_cpufreq_is_ondemand_policy(policy)) {
140                 FREQ_PRINTK_DBG("queue work\n");
141                 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, msecs_to_jiffies(TEMP_NOR_DELAY));
142         } else {
143                 FREQ_PRINTK_DBG("cancel work\n");
144                 cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
145         }
146
147         return 0;
148 }
149 static struct notifier_block notifier_policy_block = {
150         .notifier_call = rk30_cpufreq_notifier_policy
151 };
152 #endif
153 /************************************dvfs tst************************************/
154 //#define CPU_FREQ_DVFS_TST
155 #ifdef CPU_FREQ_DVFS_TST
156 static unsigned int freq_dvfs_tst_rate=0;
157 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work);
158 static DECLARE_DELAYED_WORK(rk30_cpufreq_dvsf_tst_work, rk30_cpufreq_dvsf_tst_work_func);
159 static int test_count=0;
160 #define TEST_FRE_NUM 11
161 static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,252,1512,252,816};
162 //static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,126,1512,126,816};
163
164 #define TEST_GPU_NUM 3
165
166 static int test_tlb_gpu[TEST_GPU_NUM]={360,400,180};
167 static int test_tlb_ddr[TEST_GPU_NUM]={401,200,500};
168
169
170 static int gpu_ddr=0;
171
172 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work)
173 {
174         struct cpufreq_policy *policy=cpufreq_cpu_get(0);
175         
176         gpu_ddr++;
177
178 #if 0
179         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,ddr%u,gpu%u\n",
180         test_tlb_ddr[gpu_ddr%TEST_GPU_NUM],test_tlb_gpu[gpu_ddr%TEST_GPU_NUM]);
181         clk_set_rate(ddr_clk,test_tlb_ddr[gpu_ddr%TEST_GPU_NUM]*1000*1000);
182         clk_set_rate(gpu_clk,test_tlb_gpu[gpu_ddr%TEST_GPU_NUM]*1000*1000);
183 #endif
184
185         test_count++;
186         freq_dvfs_tst_rate=test_tlb_rate[test_count%TEST_FRE_NUM]*1000;
187         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,cpu set rate %d\n",freq_dvfs_tst_rate);
188         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
189         cpufreq_cpu_put(policy);
190
191         queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, 
192                 msecs_to_jiffies(1000));
193 }
194 #endif
195 /***********************************************************************/
196 static int rk30_verify_speed(struct cpufreq_policy *policy)
197 {
198         if (!freq_table)
199                 return -EINVAL;
200         return cpufreq_frequency_table_verify(policy, freq_table);
201 }
202
203 static int rk30_cpu_init(struct cpufreq_policy *policy)
204 {
205         if (policy->cpu == 0) {
206                 cpu_clk =clk_get(NULL, "cpu");
207                 if (IS_ERR(cpu_clk))
208                         return PTR_ERR(cpu_clk);
209
210                 dvfs_clk_register_set_rate_callback(cpu_clk,cpufreq_scale_rate_for_dvfs);
211                 freq_table=dvfs_get_freq_volt_table(cpu_clk);
212                 if(freq_table==NULL)
213                 {
214                         freq_table=default_freq_table;
215                 }
216                 clk_enable_dvfs(cpu_clk);
217
218                 freq_wq = create_singlethread_workqueue("rk30_cpufreqd");
219 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
220                 if (rk30_cpufreq_is_ondemand_policy(policy)) 
221                 {
222                         queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, msecs_to_jiffies(TEMP_NOR_DELAY));
223                 }
224                 cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
225 #endif
226 #ifdef CPU_FREQ_DVFS_TST
227                 queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(20*1000));
228 #endif
229         }
230
231         //set freq min max
232         cpufreq_frequency_table_cpuinfo(policy, freq_table);
233         //sys nod
234         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
235         
236         policy->cur = rk30_getspeed(0);
237
238         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
239
240
241         /*
242          * On rk30 SMP configuartion, both processors share the voltage
243          * and clock. So both CPUs needs to be scaled together and hence
244          * needs software co-ordination. Use cpufreq affected_cpus
245          * interface to handle this scenario. Additional is_smp() check
246          * is to keep SMP_ON_UP build working.
247          */
248         if (is_smp()) 
249                 cpumask_setall(policy->cpus);
250
251         return 0;
252 }
253
254 static int rk30_cpu_exit(struct cpufreq_policy *policy)
255 {
256         if (policy->cpu != 0)
257                 return 0;
258         cpufreq_frequency_table_cpuinfo(policy, freq_table);
259         clk_put(cpu_clk);
260 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
261         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
262         if (freq_wq)
263                 cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
264 #endif
265         if (freq_wq) {
266                 flush_workqueue(freq_wq);
267                 destroy_workqueue(freq_wq);
268                 freq_wq = NULL;
269         }
270         return 0;
271 }
272
273 static struct freq_attr *rk30_cpufreq_attr[] = {
274         &cpufreq_freq_attr_scaling_available_freqs,
275         NULL,
276 };
277
278 /**************************target freq******************************/
279 static unsigned int cpufreq_scale_limt(unsigned int target_freq,struct cpufreq_policy *policy)
280 {
281         /*
282         * If the new frequency is more than the thermal max allowed
283         * frequency, go ahead and scale the mpu device to proper frequency.
284         */ 
285 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
286         if(rk30_cpufreq_is_ondemand_policy(policy)&&temp_limt_freq)
287         {
288                 target_freq=min(target_freq,temp_limt_freq);
289         }
290 #endif
291 #ifdef CPU_FREQ_DVFS_TST
292         if(freq_dvfs_tst_rate)
293         {
294                 target_freq=freq_dvfs_tst_rate;
295                 freq_dvfs_tst_rate=0;
296         }
297                 
298 #endif          
299         return target_freq;
300 }
301
302 int cpufreq_scale_rate_for_dvfs(struct clk * clk,unsigned long rate,dvfs_set_rate_callback set_rate)
303 {
304         unsigned int i;
305         int ret=-EINVAL;
306         struct cpufreq_freqs freqs;
307
308         freqs.cpu = 0;
309         freqs.old = rk30_getspeed(0);
310         freqs.new = rate/1000;
311
312         cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
313
314         FREQ_PRINTK_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n",rate);
315         ret = set_rate(clk,rate);
316
317 #if CONFIG_SMP
318         /*
319          * Note that loops_per_jiffy is not updated on SMP systems in
320          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
321          * on frequency transition. We need to update all dependent CPUs.
322          */
323         for_each_possible_cpu(i) {
324                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
325         }
326 #endif
327
328         freqs.new = rk30_getspeed(0);
329
330         cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
331
332         return ret;
333
334 }
335 static int rk30_target(struct cpufreq_policy *policy,
336                        unsigned int target_freq,
337                        unsigned int relation)
338 {
339         unsigned int i,new_rate=0;
340         int ret = 0;
341
342         if (!freq_table) {
343                 FREQ_PRINTK_ERR("no freq table!\n");
344                 return -EINVAL;
345         }
346         
347         if (relation & ENABLE_FURTHER_CPUFREQ)
348                 no_cpufreq_access--;
349         if (no_cpufreq_access) {
350 #ifdef CONFIG_PM_VERBOSE
351                 pr_err("denied access to %s as it is disabled temporarily\n", __func__);
352 #endif
353                 return -EINVAL;
354         }
355         if (relation & DISABLE_FURTHER_CPUFREQ)
356                 no_cpufreq_access++;
357         relation &= ~MASK_FURTHER_CPUFREQ;
358
359         mutex_lock(&cpufreq_mutex);
360         
361         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
362                         relation, &i);
363         
364         if (ret) {
365                 FREQ_PRINTK_ERR("no freq match for %d(ret=%d)\n",target_freq, ret);
366                 goto out;
367         }
368         //FREQ_PRINTK_DBG("%s,tlb rate%u(req %u)*****\n",__func__,freq_table[i].frequency,target_freq);
369         //new_rate=freq_table[i].frequency;
370         new_rate=cpufreq_scale_limt(freq_table[i].frequency,policy);
371         
372         FREQ_PRINTK_LOG("cpufreq req=%u,new=%u(was=%u)\n",target_freq,new_rate,rk30_getspeed(0));
373         if (new_rate == rk30_getspeed(0))
374                 goto out;
375         ret = clk_set_rate(cpu_clk,new_rate*1000);
376 out:
377         mutex_unlock(&cpufreq_mutex);   
378         FREQ_PRINTK_DBG("cpureq set rate (%u) end\n",new_rate);
379         return ret;
380 }
381
382 static int rk30_cpufreq_pm_notifier_event(struct notifier_block *this,
383                 unsigned long event, void *ptr)
384 {
385         int ret = NOTIFY_DONE;
386         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
387
388         if (!policy)
389                 return ret;
390
391         if (!rk30_cpufreq_is_ondemand_policy(policy))
392                 goto out;
393
394         switch (event) {
395                 case PM_SUSPEND_PREPARE:
396                         ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
397                         if (ret < 0) {
398                                 ret = NOTIFY_BAD;
399                                 goto out;
400                         }
401                         ret = NOTIFY_OK;
402                         break;
403                 case PM_POST_RESTORE:
404                 case PM_POST_SUSPEND:
405                         cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
406                         ret = NOTIFY_OK;
407                         break;
408         }
409 out:
410         cpufreq_cpu_put(policy);
411         return ret;
412 }
413
414 static struct notifier_block rk30_cpufreq_pm_notifier = {
415         .notifier_call = rk30_cpufreq_pm_notifier_event,
416 };
417
418 static int rk30_cpufreq_reboot_notifier_event(struct notifier_block *this,
419                 unsigned long event, void *ptr)
420 {
421         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
422
423         if (policy) {
424                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
425                 cpufreq_cpu_put(policy);
426         }
427
428         return NOTIFY_OK;
429 }
430
431 static struct notifier_block rk30_cpufreq_reboot_notifier = {
432         .notifier_call = rk30_cpufreq_reboot_notifier_event,
433 };
434
435 static struct cpufreq_driver rk30_cpufreq_driver = {
436         .verify         = rk30_verify_speed,
437         .target         = rk30_target,
438         .get            = rk30_getspeed,
439         .init           = rk30_cpu_init,
440         .exit           = rk30_cpu_exit,
441         .name           = "rk30",
442         .attr           = rk30_cpufreq_attr,
443 };
444
445 static int __init rk30_cpufreq_init(void)
446 {
447         register_pm_notifier(&rk30_cpufreq_pm_notifier);
448         register_reboot_notifier(&rk30_cpufreq_reboot_notifier);
449
450         return cpufreq_register_driver(&rk30_cpufreq_driver);
451 }
452
453 static void __exit rk30_cpufreq_exit(void)
454 {
455         cpufreq_unregister_driver(&rk30_cpufreq_driver);
456 }
457
458
459 MODULE_DESCRIPTION("cpufreq driver for rock chip rk30");
460 MODULE_LICENSE("GPL");
461 device_initcall(rk30_cpufreq_init);
462 module_exit(rk30_cpufreq_exit);