rk30: add cpufreq.c
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rk30 / cpufreq.c
1 /* arch/arm/mach-rk29/cpufreq.c
2  *
3  * Copyright (C) 2010, 2011 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <linux/tick.h>
22 #include <linux/workqueue.h>
23 #include <asm/smp_plat.h>
24 #include <asm/cpu.h>
25 #include <mach/dvfs.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28
29 #define FREQ_PRINTK_DBG(fmt, args...) {while(0);}//pr_debug(fmt, ## args)
30 #define FREQ_PRINTK_ERR(fmt, args...) pr_err(fmt, ## args)
31 #define FREQ_PRINTK_LOG(fmt, args...) printk(fmt, ## args)
32 /* Frequency table index must be sequential starting at 0 */
33 static struct cpufreq_frequency_table default_freq_table[] = {
34         {.frequency = 816*1000, .index  = 1080*1000},
35         {.frequency = CPUFREQ_TABLE_END},
36 };
37 static struct cpufreq_frequency_table *freq_table = default_freq_table;
38
39 /*********************************************************/
40 #define NUM_CPUS        2
41 static struct workqueue_struct *freq_wq;
42 static struct clk *cpu_clk;
43 static struct clk *gpu_clk;
44 static struct clk *ddr_clk;
45 static DEFINE_PER_CPU(unsigned int, target_rate);
46 static DEFINE_MUTEX(cpufreq_mutex);
47 //static DEFINE_PER_CPU(unsigned int, target_rate);
48
49 static int cpufreq_scale_rate_for_dvfs(struct clk * clk,unsigned long rate,dvfs_set_rate_callback set_rate);
50
51 /*******************************************************/
52 static unsigned int rk30_getspeed(unsigned int cpu)
53 {
54         unsigned long rate;
55
56         if (cpu >= NR_CPUS)
57                 return 0;
58
59         rate = clk_get_rate(cpu_clk) / 1000;
60         return rate;
61 }
62 static bool rk30_cpufreq_is_ondemand_policy(struct cpufreq_policy *policy)
63 {
64         char c = 0;
65         if (policy && policy->governor)
66                 c = policy->governor->name[0];
67         return (c == 'o' || c == 'i' || c == 'c');
68 }
69
70 /**********************thermal limit**************************/
71 #define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
72
73 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
74 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work);
75
76 static DECLARE_DELAYED_WORK(rk30_cpufreq_temp_limit_work, rk30_cpufreq_temp_limit_work_func);
77
78 static unsigned int temp_limt_freq=0;
79 #define _TEMP_LIMIT_FREQ 816000
80 #define TEMP_NOR 55
81 #define TEMP_HOT 80
82
83 #define TEMP_NOR_DELAY 5000 //2s
84 unsigned int get_detect_temp_dly(int temp)
85 {
86         unsigned int dly=TEMP_NOR_DELAY;
87         if(temp>TEMP_NOR)
88                 dly-=(temp-TEMP_NOR)*25;
89         //FREQ_PRINTK_DBG("cpu_thermal delay(%d)\n",dly);
90         return dly;
91
92 int rk30_tsadc_get_temp(unsigned int chn);
93
94 #define get_cpu_thermal() rk30_tsadc_get_temp(0)
95 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work)
96 {
97         struct cpufreq_policy *policy;
98         int temp_rd=0,avg_temp,i;       
99         avg_temp=get_cpu_thermal();
100         
101         for(i=0;i<4;i++)
102         {
103                 temp_rd=get_cpu_thermal();
104                 avg_temp=(avg_temp+temp_rd)>>1;
105         }
106         FREQ_PRINTK_LOG("cpu_thermal(%d)\n",temp_rd);
107         
108         if(avg_temp>TEMP_HOT)
109         {
110                 temp_limt_freq=_TEMP_LIMIT_FREQ;
111                 policy = cpufreq_cpu_get(0);
112                 FREQ_PRINTK_DBG("temp_limit set rate %d kHz\n",temp_limt_freq);
113                 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
114                 cpufreq_cpu_put(policy);
115         }
116         else
117                 temp_limt_freq=0;
118         
119         queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 
120                 msecs_to_jiffies(get_detect_temp_dly(avg_temp)));
121 }
122
123 static int rk30_cpufreq_notifier_policy(struct notifier_block *nb,
124                 unsigned long val, void *data)
125 {
126         struct cpufreq_policy *policy = data;
127
128         if (val != CPUFREQ_NOTIFY)
129                 return 0;
130
131         if (rk30_cpufreq_is_ondemand_policy(policy)) {
132                 FREQ_PRINTK_DBG("queue work\n");
133                 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, msecs_to_jiffies(TEMP_NOR_DELAY));
134         } else {
135                 FREQ_PRINTK_DBG("cancel work\n");
136                 cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
137         }
138
139         return 0;
140 }
141 static struct notifier_block notifier_policy_block = {
142         .notifier_call = rk30_cpufreq_notifier_policy
143 };
144 #endif
145 /************************************dvfs tst************************************/
146 //#define CPU_FREQ_DVFS_TST
147 #ifdef CPU_FREQ_DVFS_TST
148 static unsigned int freq_dvfs_tst_rate=0;
149 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work);
150 static DECLARE_DELAYED_WORK(rk30_cpufreq_dvsf_tst_work, rk30_cpufreq_dvsf_tst_work_func);
151 static int test_count=0;
152 #define TEST_FRE_NUM 11
153 static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,252,1512,252,816};
154 //static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,126,1512,126,816};
155
156 #define TEST_GPU_NUM 3
157
158 static int test_tlb_gpu[TEST_GPU_NUM]={360,400,180};
159 static int test_tlb_ddr[TEST_GPU_NUM]={401,200,500};
160
161
162 static int gpu_ddr=0;
163
164 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work)
165 {
166         struct cpufreq_policy *policy=cpufreq_cpu_get(0);
167         
168         gpu_ddr++;
169
170 #if 0
171         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,ddr%u,gpu%u\n",
172         test_tlb_ddr[gpu_ddr%TEST_GPU_NUM],test_tlb_gpu[gpu_ddr%TEST_GPU_NUM]);
173         clk_set_rate(ddr_clk,test_tlb_ddr[gpu_ddr%TEST_GPU_NUM]*1000*1000);
174         clk_set_rate(gpu_clk,test_tlb_gpu[gpu_ddr%TEST_GPU_NUM]*1000*1000);
175 #endif
176
177         test_count++;
178         freq_dvfs_tst_rate=test_tlb_rate[test_count%TEST_FRE_NUM]*1000;
179         FREQ_PRINTK_LOG("cpufreq_dvsf_tst,cpu set rate %d\n",freq_dvfs_tst_rate);
180         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
181         cpufreq_cpu_put(policy);
182
183         queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, 
184                 msecs_to_jiffies(1000));
185 }
186 #endif
187 /***********************************************************************/
188 static int rk30_verify_speed(struct cpufreq_policy *policy)
189 {
190         if (!freq_table)
191                 return -EINVAL;
192         return cpufreq_frequency_table_verify(policy, freq_table);
193 }
194
195 static unsigned long _clk_loops_rate_ref;
196 static void calc_lpj_ref_get(int kmz)
197 {
198         clk_set_rate(cpu_clk,kmz*1000);
199         calibrate_delay();
200         _clk_loops_rate_ref=clk_get_rate(cpu_clk);
201         FREQ_PRINTK_DBG("****loops_per_jiffy=%lu,rate=%lu\n",loops_per_jiffy,_clk_loops_rate_ref);
202 }
203
204
205 static int rk30_cpu_init(struct cpufreq_policy *policy)
206 {
207         //printk("rk30_cpu_init\n");
208         cpu_clk =clk_get(NULL, "cpu");
209         if (IS_ERR(cpu_clk))
210                 return PTR_ERR(cpu_clk);
211
212         #if 0
213                 gpu_clk=clk_get(NULL, "gpu");
214                 if (IS_ERR(gpu_clk))
215                 return PTR_ERR(gpu_clk);
216                 ddr_clk=clk_get(NULL, "ddr");
217                 if (IS_ERR(ddr_clk))
218                 return PTR_ERR(ddr_clk);
219                 regulator_set_voltage(regulator_get(NULL,"vdd_core"),1125*1000,1125*1000);
220                 //      clk_enable_dvfs(ddr_clk);
221                 //clk_enable_dvfs(gpu_clk);
222         #endif
223         dvfs_clk_register_set_rate_callback(cpu_clk,cpufreq_scale_rate_for_dvfs);
224         freq_table=dvfs_get_freq_volt_table(cpu_clk);
225         if(freq_table==NULL)
226         {
227                 freq_table=default_freq_table;
228         }
229         clk_enable_dvfs(cpu_clk);
230
231         freq_wq = create_singlethread_workqueue("rk29_cpufreqd");
232         #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
233         if (rk30_cpufreq_is_ondemand_policy(policy)) 
234         {
235                 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, msecs_to_jiffies(TEMP_NOR_DELAY));
236         }
237         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
238         #endif
239         #ifdef CPU_FREQ_DVFS_TST
240                 queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(20*1000));
241         #endif
242
243         //set freq min max
244         cpufreq_frequency_table_cpuinfo(policy, freq_table);
245         //sys nod
246         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
247         
248         policy->cur = rk30_getspeed(0);
249
250         /* FIXME: what's the actual transition time? */
251         policy->cpuinfo.transition_latency = 300 * 1000;
252
253
254         /*
255          * On rk30 SMP configuartion, both processors share the voltage
256          * and clock. So both CPUs needs to be scaled together and hence
257          * needs software co-ordination. Use cpufreq affected_cpus
258          * interface to handle this scenario. Additional is_smp() check
259          * is to keep SMP_ON_UP build working.
260          */
261         #if 0
262         policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
263         cpumask_copy(policy->related_cpus, cpu_possible_mask);
264         #else
265         if (is_smp()) 
266         {
267                 //policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
268                 cpumask_setall(policy->cpus);
269         }
270         #endif
271
272         return 0;
273 }
274
275 static int rk30_cpu_exit(struct cpufreq_policy *policy)
276 {
277         cpufreq_frequency_table_cpuinfo(policy, freq_table);
278         clk_put(cpu_clk);
279 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
280         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
281         if (freq_wq)
282         cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
283 #endif
284         if (freq_wq) {
285                 flush_workqueue(freq_wq);
286                 destroy_workqueue(freq_wq);
287                 freq_wq = NULL;
288         }
289         return 0;
290 }
291
292 static struct freq_attr *rk30_cpufreq_attr[] = {
293         &cpufreq_freq_attr_scaling_available_freqs,
294         NULL,
295 };
296
297 /**************************target freq******************************/
298 int rk30_board_update_cpufreq_table(struct cpufreq_frequency_table *table)
299 {
300         mutex_lock(&cpufreq_mutex);
301
302         freq_table = table;
303         mutex_unlock(&cpufreq_mutex);
304         return 0;
305 }
306 static unsigned int cpufreq_scale_limt(unsigned int target_freq,struct cpufreq_policy *policy)
307 {
308         /*
309         * If the new frequency is more than the thermal max allowed
310         * frequency, go ahead and scale the mpu device to proper frequency.
311         */ 
312 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
313         if(rk30_cpufreq_is_ondemand_policy(policy)&&temp_limt_freq)
314         {
315                 printk("temp_limt_freq=%u\n",temp_limt_freq);
316                 target_freq=min(target_freq,temp_limt_freq);
317         }
318 #endif
319 #ifdef CPU_FREQ_DVFS_TST
320         if(freq_dvfs_tst_rate)
321         {
322                 target_freq=freq_dvfs_tst_rate;
323                 freq_dvfs_tst_rate=0;
324         }
325                 
326 #endif          
327         return target_freq;
328 }
329
330 int cpufreq_scale_rate_for_dvfs(struct clk * clk,unsigned long rate,dvfs_set_rate_callback set_rate)
331 {
332         int ret=-EINVAL;
333         struct cpufreq_freqs freqs;
334         
335         freqs.new=rate/1000;
336         freqs.old=rk30_getspeed(0);
337         
338         get_online_cpus();
339         for_each_online_cpu(freqs.cpu)
340                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
341         FREQ_PRINTK_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n",rate);
342         ret = set_rate(clk,rate);
343
344 #if 0//CONFIG_SMP
345         /*
346         * Note that loops_per_jiffy is not updated on SMP systems in
347         * cpufreq driver. So, update the per-CPU loops_per_jiffy value
348         * on frequency transition. We need to update all dependent CPUs.
349         */
350         for_each_possible_cpu(i) {
351         per_cpu(cpu_data, i).loops_per_jiffy =
352         cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
353         }
354 #endif
355
356         freqs.old=freqs.new;
357         freqs.new=rk30_getspeed(0);
358         /* notifiers */
359         for_each_online_cpu(freqs.cpu)
360                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
361         put_online_cpus();
362         return ret;
363
364 }
365 static int rk30_target(struct cpufreq_policy *policy,
366                        unsigned int target_freq,
367                        unsigned int relation)
368 {
369         unsigned int i,new_rate=0;
370         int ret = 0;
371
372         if (!freq_table) {
373                 FREQ_PRINTK_ERR("no freq table!\n");
374                 return -EINVAL;
375         }
376         
377         mutex_lock(&cpufreq_mutex);
378         
379         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
380                         relation, &i);
381         
382         if (ret) {
383                 FREQ_PRINTK_ERR("no freq match for %d(ret=%d)\n",target_freq, ret);
384                 return ret;
385         }
386         //FREQ_PRINTK_DBG("%s,tlb rate%u(req %u)*****\n",__func__,freq_table[i].frequency,target_freq);
387         //new_rate=freq_table[i].frequency;
388         new_rate=cpufreq_scale_limt(freq_table[i].frequency,policy);
389         
390         FREQ_PRINTK_LOG("cpufreq req=%u,new=%u(was=%u)\n",target_freq,new_rate,rk30_getspeed(0));
391         if (new_rate == rk30_getspeed(0))
392                 goto out;
393         ret = clk_set_rate(cpu_clk,new_rate*1000);
394 out:
395         mutex_unlock(&cpufreq_mutex);   
396         FREQ_PRINTK_DBG("cpureq set rate (%u) end\n",new_rate);
397         return ret;
398 }
399
400 static struct cpufreq_driver rk30_cpufreq_driver = {
401         .verify         = rk30_verify_speed,
402         .target         = rk30_target,
403         .get            = rk30_getspeed,
404         .init           = rk30_cpu_init,
405         .exit           = rk30_cpu_exit,
406         .name           = "rk30",
407         .attr           = rk30_cpufreq_attr,
408 };
409
410 static int __init rk30_cpufreq_init(void)
411 {
412         return cpufreq_register_driver(&rk30_cpufreq_driver);
413 }
414
415 static void __exit rk30_cpufreq_exit(void)
416 {
417         cpufreq_unregister_driver(&rk30_cpufreq_driver);
418 }
419
420
421 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
422 MODULE_DESCRIPTION("cpufreq driver for rock chip rk30");
423 MODULE_LICENSE("GPL");
424 module_init(rk30_cpufreq_init);
425 module_exit(rk30_cpufreq_exit);