2 * Copyright (C) 2012-2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #define pr_fmt(fmt) "cpufreq: " fmt
16 #include <linux/clk.h>
17 #include <linux/cpufreq.h>
18 #include <linux/err.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/init.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <asm/smp_plat.h>
27 #include <mach/dvfs.h>
28 #include <linux/delay.h>
29 #include <linux/regulator/consumer.h>
31 #include <linux/string.h>
32 #include <linux/earlysuspend.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
36 #include <linux/cpu.h>
38 #define FREQ_PRINTK_DBG(fmt, args...) pr_debug(fmt, ## args)
39 #define FREQ_PRINTK_LOG(fmt, args...) pr_debug(fmt, ## args)
41 #define FREQ_PRINTK_DBG(fmt, args...) do {} while(0)
42 #define FREQ_PRINTK_LOG(fmt, args...) do {} while(0)
44 #define FREQ_PRINTK_ERR(fmt, args...) pr_err(fmt, ## args)
46 /* Frequency table index must be sequential starting at 0 */
47 static struct cpufreq_frequency_table default_freq_table[] = {
48 {.frequency = 816 * 1000, .index = 1100 * 1000},
49 {.frequency = CPUFREQ_TABLE_END},
52 static struct cpufreq_frequency_table *freq_table = default_freq_table;
53 static unsigned int max_freq = -1;
55 /*********************************************************/
57 /* additional symantics for "relation" in cpufreq with pm */
58 #define DISABLE_FURTHER_CPUFREQ 0x10
59 #define ENABLE_FURTHER_CPUFREQ 0x20
60 #define MASK_FURTHER_CPUFREQ 0x30
61 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
62 #define CPUFREQ_PRIVATE 0x100
63 static int no_cpufreq_access;
64 static unsigned int suspend_freq = 816 * 1000;
66 static struct workqueue_struct *freq_wq;
67 static struct clk *cpu_clk;
68 static struct clk *cpu_pll;
69 static struct clk *cpu_gpll;
72 static DEFINE_MUTEX(cpufreq_mutex);
74 static struct clk *gpu_clk;
75 static struct clk *ddr_clk;
76 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
77 #define GPU_MAX_RATE 350*1000*1000
80 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate, dvfs_set_rate_callback set_rate);
82 /*******************************************************/
83 static unsigned int rk30_getspeed(unsigned int cpu)
90 rate = clk_get_rate(cpu_clk) / 1000;
94 static bool rk30_cpufreq_is_ondemand_policy(struct cpufreq_policy *policy)
97 if (policy && policy->governor)
98 c = policy->governor->name[0];
99 return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
102 /**********************thermal limit**************************/
103 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
104 #define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
107 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
108 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(rk30_cpufreq_temp_limit_work, rk30_cpufreq_temp_limit_work_func);
112 static unsigned int temp_limt_freq = -1;
113 module_param(temp_limt_freq, uint, 0444);
115 #define TEMP_LIMIT_FREQ 816000
117 static const struct cpufreq_frequency_table temp_limits[] = {
118 {.frequency = 1416 * 1000, .index = 50},
119 {.frequency = 1200 * 1000, .index = 55},
120 {.frequency = 1008 * 1000, .index = 60},
121 {.frequency = 816 * 1000, .index = 75},
124 static const struct cpufreq_frequency_table temp_limits_high[] = {
125 {.frequency = 816 * 1000, .index = 100},
128 extern int rk30_tsadc_get_temp(unsigned int chn);
130 static void rk30_cpufreq_temp_limit_work_func(struct work_struct *work)
132 struct cpufreq_policy *policy;
134 unsigned int new = -1;
135 unsigned long delay = HZ;
136 const struct cpufreq_frequency_table *limits_table = temp_limits;
137 size_t limits_size = ARRAY_SIZE(temp_limits);
138 unsigned int gpu_irqs[2];
139 gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
141 temp = rk30_tsadc_get_temp(0);
142 FREQ_PRINTK_LOG("cpu_thermal(%d)\n", temp);
144 gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
145 if (clk_get_rate(gpu_clk) > GPU_MAX_RATE) {
147 if ((gpu_irqs[1] - gpu_irqs[0]) < 3) {
148 limits_table = temp_limits_high;
149 limits_size = ARRAY_SIZE(temp_limits_high);
152 for (i = 0; i < limits_size; i++) {
153 if (temp > limits_table[i].index) {
154 new = limits_table[i].frequency;
157 if (temp_limt_freq != new) {
158 temp_limt_freq = new;
159 FREQ_PRINTK_DBG("temp_limit set rate %d kHz\n", temp_limt_freq);
160 policy = cpufreq_cpu_get(0);
161 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
162 cpufreq_cpu_put(policy);
165 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, delay);
168 static int rk30_cpufreq_notifier_policy(struct notifier_block *nb,
169 unsigned long val, void *data)
171 struct cpufreq_policy *policy = data;
173 if (val != CPUFREQ_NOTIFY)
176 if (rk30_cpufreq_is_ondemand_policy(policy)) {
177 FREQ_PRINTK_DBG("queue work\n");
178 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 0);
180 FREQ_PRINTK_DBG("cancel work\n");
181 cancel_delayed_work_sync(&rk30_cpufreq_temp_limit_work);
187 static struct notifier_block notifier_policy_block = {
188 .notifier_call = rk30_cpufreq_notifier_policy
192 /************************************dvfs tst************************************/
193 //#define CPU_FREQ_DVFS_TST
194 #ifdef CPU_FREQ_DVFS_TST
195 static unsigned int freq_dvfs_tst_rate;
196 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work);
197 static DECLARE_DELAYED_WORK(rk30_cpufreq_dvsf_tst_work, rk30_cpufreq_dvsf_tst_work_func);
198 static int test_count;
199 #define TEST_FRE_NUM 11
200 static int test_tlb_rate[TEST_FRE_NUM] = { 504, 1008, 504, 1200, 252, 816, 1416, 252, 1512, 252, 816 };
201 //static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,126,1512,126,816};
203 #define TEST_GPU_NUM 3
205 static int test_tlb_gpu[TEST_GPU_NUM] = { 360, 400, 180 };
206 static int test_tlb_ddr[TEST_GPU_NUM] = { 401, 200, 500 };
208 static int gpu_ddr = 0;
210 static void rk30_cpufreq_dvsf_tst_work_func(struct work_struct *work)
212 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
217 FREQ_PRINTK_LOG("cpufreq_dvsf_tst,ddr%u,gpu%u\n",
218 test_tlb_ddr[gpu_ddr % TEST_GPU_NUM],
219 test_tlb_gpu[gpu_ddr % TEST_GPU_NUM]);
220 clk_set_rate(ddr_clk, test_tlb_ddr[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
221 clk_set_rate(gpu_clk, test_tlb_gpu[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
225 freq_dvfs_tst_rate = test_tlb_rate[test_count % TEST_FRE_NUM] * 1000;
226 FREQ_PRINTK_LOG("cpufreq_dvsf_tst,cpu set rate %d\n", freq_dvfs_tst_rate);
227 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
228 cpufreq_cpu_put(policy);
230 queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(1000));
232 #endif /* CPU_FREQ_DVFS_TST */
234 /***********************************************************************/
235 static int rk30_verify_speed(struct cpufreq_policy *policy)
239 return cpufreq_frequency_table_verify(policy, freq_table);
241 static int rk30_cpu_init(struct cpufreq_policy *policy)
243 if (policy->cpu == 0) {
246 gpu_clk = clk_get(NULL, "gpu");
247 if (!IS_ERR(gpu_clk))
248 clk_enable_dvfs(gpu_clk);
250 ddr_clk = clk_get(NULL, "ddr");
251 if (!IS_ERR(ddr_clk))
252 clk_enable_dvfs(ddr_clk);
254 cpu_clk = clk_get(NULL, "cpu");
256 cpu_pll = clk_get(NULL, "arm_pll");
258 cpu_gpll = clk_get(NULL, "arm_gpll");
260 return PTR_ERR(cpu_clk);
262 dvfs_clk_register_set_rate_callback(cpu_clk, cpufreq_scale_rate_for_dvfs);
263 freq_table = dvfs_get_freq_volt_table(cpu_clk);
264 if (freq_table == NULL) {
265 freq_table = default_freq_table;
267 max_freq = freq_table[0].frequency;
268 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
269 max_freq = max(max_freq, freq_table[i].frequency);
271 clk_enable_dvfs(cpu_clk);
273 #if !defined(CONFIG_ARCH_RK3066B)
274 #if defined(CONFIG_ARCH_RK30)
275 /* Limit gpu frequency between 133M to 400M */
276 dvfs_clk_enable_limit(gpu_clk, 133000000, 400000000);
280 freq_wq = create_singlethread_workqueue("rk30_cpufreqd");
281 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
282 if (rk30_cpufreq_is_ondemand_policy(policy)) {
283 queue_delayed_work(freq_wq, &rk30_cpufreq_temp_limit_work, 0*HZ);
285 cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
287 #ifdef CPU_FREQ_DVFS_TST
288 queue_delayed_work(freq_wq, &rk30_cpufreq_dvsf_tst_work, msecs_to_jiffies(20 * 1000));
292 cpufreq_frequency_table_cpuinfo(policy, freq_table);
294 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
296 policy->cur = rk30_getspeed(0);
298 policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
301 * On rk30 SMP configuartion, both processors share the voltage
302 * and clock. So both CPUs needs to be scaled together and hence
303 * needs software co-ordination. Use cpufreq affected_cpus
304 * interface to handle this scenario. Additional is_smp() check
305 * is to keep SMP_ON_UP build working.
308 cpumask_setall(policy->cpus);
313 static int rk30_cpu_exit(struct cpufreq_policy *policy)
315 if (policy->cpu != 0)
318 cpufreq_frequency_table_cpuinfo(policy, freq_table);
320 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
321 cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
323 cancel_delayed_work(&rk30_cpufreq_temp_limit_work);
326 flush_workqueue(freq_wq);
327 destroy_workqueue(freq_wq);
334 static struct freq_attr *rk30_cpufreq_attr[] = {
335 &cpufreq_freq_attr_scaling_available_freqs,
339 /**************************earlysuspend freeze cpu frequency******************************/
340 static struct early_suspend ff_early_suspend;
342 #define FILE_GOV_MODE "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"
343 #define FILE_SETSPEED "/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed"
344 #define FILE_CUR_FREQ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq"
346 #define FF_DEBUG(fmt, args...) printk(KERN_DEBUG "FREEZE FREQ DEBUG:\t"fmt, ##args)
347 #define FF_ERROR(fmt, args...) printk(KERN_ERR "FREEZE FREQ ERROR:\t"fmt, ##args)
349 static int ff_read(char *file_path, char *buf)
351 struct file *file = NULL;
355 FF_DEBUG("read %s\n", file_path);
356 file = filp_open(file_path, O_RDONLY, 0);
359 FF_ERROR("%s error open file %s\n", __func__, file_path);
366 file->f_op->read(file, (char *)buf, 32, &offset);
367 sscanf(buf, "%s", buf);
370 filp_close(file, NULL);
378 static int ff_write(char *file_path, char *buf)
380 struct file *file = NULL;
384 FF_DEBUG("write %s %s size = %d\n", file_path, buf, strlen(buf));
385 file = filp_open(file_path, O_RDWR, 0);
388 FF_ERROR("%s error open file %s\n", __func__, file_path);
395 file->f_op->write(file, (char *)buf, strlen(buf), &offset);
398 filp_close(file, NULL);
406 static void ff_scale_votlage(char *name, int volt)
408 struct regulator* regulator;
411 FF_DEBUG("enter %s\n", __func__);
412 regulator = dvfs_get_regulator(name);
414 FF_ERROR("get regulator %s ERROR\n", name);
418 ret = regulator_set_voltage(regulator, volt, volt);
420 FF_ERROR("set voltage error %s %d, ret = %d\n", name, volt, ret);
424 int clk_set_parent_force(struct clk *clk, struct clk *parent);
425 static void ff_early_suspend_func(struct early_suspend *h)
428 FF_DEBUG("enter %s\n", __func__);
429 if (ff_read(FILE_GOV_MODE, buf) != 0) {
430 FF_ERROR("read current governor error\n");
433 FF_DEBUG("current governor = %s\n", buf);
436 strcpy(buf, "userspace");
437 if (ff_write(FILE_GOV_MODE, buf) != 0) {
438 FF_ERROR("set current governor error\n");
442 strcpy(buf, "252000");
443 if (ff_write(FILE_SETSPEED, buf) != 0) {
444 FF_ERROR("set speed to 252MHz error\n");
448 if (!IS_ERR(cpu_pll)&&!IS_ERR(cpu_gpll)&&!IS_ERR(cpu_clk))
450 clk_set_parent_force(cpu_clk,cpu_gpll);
451 clk_set_rate(cpu_clk,300*1000*1000);
453 clk_disable_dvfs(cpu_clk);
455 if (!IS_ERR(gpu_clk))
456 dvfs_clk_enable_limit(gpu_clk,75*1000*1000,133*1000*1000);
458 //ff_scale_votlage("vdd_cpu", 1000000);
459 //ff_scale_votlage("vdd_core", 1000000);
460 #ifdef CONFIG_HOTPLUG_CPU
465 static void ff_early_resume_func(struct early_suspend *h)
468 FF_DEBUG("enter %s\n", __func__);
470 if (!IS_ERR(cpu_pll)&&!IS_ERR(cpu_gpll)&&!IS_ERR(cpu_clk))
472 clk_set_parent_force(cpu_clk,cpu_pll);
473 clk_set_rate(cpu_clk,300*1000*1000);
474 clk_enable_dvfs(cpu_clk);
477 if (!IS_ERR(gpu_clk))
478 dvfs_clk_disable_limit(gpu_clk);
479 #ifdef CONFIG_HOTPLUG_CPU
482 if (ff_read(FILE_GOV_MODE, buf) != 0) {
483 FF_ERROR("read current governor error\n");
486 FF_DEBUG("current governor = %s\n", buf);
489 if (ff_read(FILE_CUR_FREQ, buf) != 0) {
490 FF_ERROR("read current frequency error\n");
493 FF_DEBUG("current frequency = %s\n", buf);
496 strcpy(buf, "interactive");
497 if (ff_write(FILE_GOV_MODE, buf) != 0) {
498 FF_ERROR("set current governor error\n");
502 strcpy(buf, "interactive");
503 if (ff_write(FILE_GOV_MODE, buf) != 0) {
504 FF_ERROR("set current governor error\n");
509 static int __init ff_init(void)
511 FF_DEBUG("enter %s\n", __func__);
512 #ifdef CONFIG_HAS_EARLYSUSPEND
513 ff_early_suspend.suspend = ff_early_suspend_func;
514 ff_early_suspend.resume = ff_early_resume_func;
515 ff_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 100;
517 register_early_suspend(&ff_early_suspend);
521 static void __exit ff_exit(void)
523 FF_DEBUG("enter %s\n", __func__);
524 unregister_early_suspend(&ff_early_suspend);
527 /**************************target freq******************************/
528 static unsigned int cpufreq_scale_limt(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
530 bool is_ondemand = rk30_cpufreq_is_ondemand_policy(policy);
531 static bool is_booting = true;
537 #if !defined(CONFIG_ARCH_RK3066B) && !defined(CONFIG_ARCH_RK3188)
538 if (is_booting && target_freq >= 1600 * 1000) {
539 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
540 if (boottime_ms > 30 * MSEC_PER_SEC) {
543 target_freq = 1416 * 1000;
548 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
550 static unsigned int ondemand_target = TEMP_LIMIT_FREQ;
552 target_freq = ondemand_target;
554 ondemand_target = target_freq;
557 if (target_freq != policy->max && target_freq > policy->cur && policy->cur >= TEMP_LIMIT_FREQ) {
558 if (cpufreq_frequency_table_target(policy, freq_table, policy->cur + 1, CPUFREQ_RELATION_L, &i) == 0) {
559 unsigned int f = freq_table[i].frequency;
560 if (f < target_freq) {
566 * If the new frequency is more than the thermal max allowed
567 * frequency, go ahead and scale the mpu device to proper frequency.
569 target_freq = min(target_freq, temp_limt_freq);
572 #ifdef CPU_FREQ_DVFS_TST
573 if (freq_dvfs_tst_rate) {
574 target_freq = freq_dvfs_tst_rate;
575 freq_dvfs_tst_rate = 0;
581 int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate, dvfs_set_rate_callback set_rate)
585 struct cpufreq_freqs freqs;
587 freqs.new = rate / 1000;
588 freqs.old = rk30_getspeed(0);
590 for_each_online_cpu(freqs.cpu) {
591 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
593 FREQ_PRINTK_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
594 ret = set_rate(clk, rate);
598 * Note that loops_per_jiffy is not updated on SMP systems in
599 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
600 * on frequency transition. We need to update all dependent CPUs.
602 for_each_possible_cpu(i) {
603 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
607 freqs.new = rk30_getspeed(0);
609 for_each_online_cpu(freqs.cpu) {
610 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
616 static int rk30_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
618 unsigned int i, new_rate = 0;
623 FREQ_PRINTK_ERR("no freq table!\n");
627 mutex_lock(&cpufreq_mutex);
629 is_private = relation & CPUFREQ_PRIVATE;
630 relation &= ~CPUFREQ_PRIVATE;
632 if (relation & ENABLE_FURTHER_CPUFREQ)
634 if (no_cpufreq_access) {
635 #ifdef CONFIG_PM_VERBOSE
636 pr_err("denied access to %s as it is disabled temporarily\n", __func__);
641 if (relation & DISABLE_FURTHER_CPUFREQ)
643 relation &= ~MASK_FURTHER_CPUFREQ;
645 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
647 FREQ_PRINTK_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
650 new_rate = freq_table[i].frequency;
651 if (!no_cpufreq_access)
652 new_rate = cpufreq_scale_limt(new_rate, policy, is_private);
654 FREQ_PRINTK_LOG("cpufreq req=%u,new=%u(was=%u)\n", target_freq, new_rate, rk30_getspeed(0));
655 if (new_rate == rk30_getspeed(0))
657 ret = clk_set_rate(cpu_clk, new_rate * 1000);
659 mutex_unlock(&cpufreq_mutex);
660 FREQ_PRINTK_DBG("cpureq set rate (%u) end\n", new_rate);
664 static int rk30_cpufreq_pm_notifier_event(struct notifier_block *this,
665 unsigned long event, void *ptr)
667 int ret = NOTIFY_DONE;
668 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
673 if (!rk30_cpufreq_is_ondemand_policy(policy))
677 case PM_SUSPEND_PREPARE:
678 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
685 case PM_POST_RESTORE:
686 case PM_POST_SUSPEND:
687 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
692 cpufreq_cpu_put(policy);
696 static struct notifier_block rk30_cpufreq_pm_notifier = {
697 .notifier_call = rk30_cpufreq_pm_notifier_event,
700 static int rk30_cpufreq_reboot_notifier_event(struct notifier_block *this,
701 unsigned long event, void *ptr)
703 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
706 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
707 cpufreq_cpu_put(policy);
713 static struct notifier_block rk30_cpufreq_reboot_notifier = {
714 .notifier_call = rk30_cpufreq_reboot_notifier_event,
717 static struct cpufreq_driver rk30_cpufreq_driver = {
718 .flags = CPUFREQ_CONST_LOOPS,
719 .verify = rk30_verify_speed,
720 .target = rk30_target,
721 .get = rk30_getspeed,
722 .init = rk30_cpu_init,
723 .exit = rk30_cpu_exit,
725 .attr = rk30_cpufreq_attr,
728 static int __init rk30_cpufreq_init(void)
730 register_pm_notifier(&rk30_cpufreq_pm_notifier);
731 register_reboot_notifier(&rk30_cpufreq_reboot_notifier);
732 return cpufreq_register_driver(&rk30_cpufreq_driver);
735 static void __exit rk30_cpufreq_exit(void)
737 cpufreq_unregister_driver(&rk30_cpufreq_driver);
740 MODULE_DESCRIPTION("cpufreq driver for rock chip rk30");
741 MODULE_LICENSE("GPL");
742 device_initcall(rk30_cpufreq_init);
743 module_exit(rk30_cpufreq_exit);