rk3288: raise vdd_logic to 1.3v, gpu dvfs table add 600M support
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip-cpufreq.c
1 /*
2  * Copyright (C) 2013 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/fs.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
32 #include <asm/cpu.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
35
36 #define VERSION "1.0"
37
38 #ifdef DEBUG
39 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
40 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
41 #else
42 #define FREQ_DBG(fmt, args...) do {} while(0)
43 #define FREQ_LOG(fmt, args...) do {} while(0)
44 #endif
45 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
46
47 /* Frequency table index must be sequential starting at 0 */
48 static struct cpufreq_frequency_table default_freq_table[] = {
49         {.frequency = 312 * 1000,       .index = 875 * 1000},
50         {.frequency = 504 * 1000,       .index = 925 * 1000},
51         {.frequency = 816 * 1000,       .index = 975 * 1000},
52         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
53         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
54         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
55         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
56         {.frequency = CPUFREQ_TABLE_END},
57 };
58 static struct cpufreq_frequency_table *freq_table = default_freq_table;
59 /*********************************************************/
60 /* additional symantics for "relation" in cpufreq with pm */
61 #define DISABLE_FURTHER_CPUFREQ         0x10
62 #define ENABLE_FURTHER_CPUFREQ          0x20
63 #define MASK_FURTHER_CPUFREQ            0x30
64 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
65 #define CPUFREQ_PRIVATE                 0x100
66 static int no_cpufreq_access;
67 static unsigned int suspend_freq = 816 * 1000;
68 static unsigned int suspend_volt = 1000000; // 1V
69 static unsigned int low_battery_freq = 600 * 1000;
70 static unsigned int low_battery_capacity = 5; // 5%
71 static bool is_booting = true;
72 static struct workqueue_struct *freq_wq;
73 static DEFINE_MUTEX(cpufreq_mutex);
74 static bool gpu_is_mali400;
75 struct dvfs_node *clk_cpu_dvfs_node = NULL;
76 struct dvfs_node *clk_gpu_dvfs_node = NULL;
77 struct dvfs_node *clk_vepu_dvfs_node = NULL;
78 /*******************************************************/
79 static unsigned int cpufreq_get_rate(unsigned int cpu)
80 {
81         if (clk_cpu_dvfs_node)
82                 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
83
84         return 0;
85 }
86
87 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
88 {
89         char c = 0;
90         if (policy && policy->governor)
91                 c = policy->governor->name[0];
92         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
93 }
94
95 static unsigned int get_freq_from_table(unsigned int max_freq)
96 {
97         unsigned int i;
98         unsigned int target_freq = 0;
99         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
100                 unsigned int freq = freq_table[i].frequency;
101                 if (freq <= max_freq && target_freq < freq) {
102                         target_freq = freq;
103                 }
104         }
105         if (!target_freq)
106                 target_freq = max_freq;
107         return target_freq;
108 }
109
110 /**********************thermal limit**************************/
111 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
112
113 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
114 static unsigned int temp_limit_freq = -1;
115 module_param(temp_limit_freq, uint, 0444);
116
117 static struct cpufreq_frequency_table temp_limits[4][4] = {
118         {       // 1 CPU busy
119                 {.frequency =          -1, .index = 50},
120                 {.frequency =          -1, .index = 55},
121                 {.frequency =          -1, .index = 60},
122                 {.frequency = 1608 * 1000, .index = 75},
123         }, {    // 2 CPUs busy
124                 {.frequency = 1800 * 1000, .index = 50},
125                 {.frequency = 1608 * 1000, .index = 55},
126                 {.frequency = 1416 * 1000, .index = 60},
127                 {.frequency = 1200 * 1000, .index = 75},
128         }, {    // 3 CPUs busy
129                 {.frequency = 1608 * 1000, .index = 50},
130                 {.frequency = 1416 * 1000, .index = 55},
131                 {.frequency = 1200 * 1000, .index = 60},
132                 {.frequency = 1008 * 1000, .index = 75},
133         }, {    // 4 CPUs busy
134                 {.frequency = 1416 * 1000, .index = 50},
135                 {.frequency = 1200 * 1000, .index = 55},
136                 {.frequency = 1008 * 1000, .index = 60},
137                 {.frequency =  816 * 1000, .index = 75},
138         }
139 };
140
141 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
142         {.frequency = 1008 * 1000, .index = 100},
143 };
144
145 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
146         {.frequency = 1008 * 1000, .index = 0},
147 };
148
149 static int get_temp(void)
150 {
151         return 60;
152 }
153
154 static char sys_state;
155 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
156 {
157         char state;
158
159         if (count < 1)
160                 return count;
161         if (copy_from_user(&state, buffer, 1)) {
162                 return -EFAULT;
163         }
164
165         sys_state = state;
166         return count;
167 }
168
169 static const struct file_operations sys_state_fops = {
170         .owner  = THIS_MODULE,
171         .write  = sys_state_write,
172 };
173
174 static struct miscdevice sys_state_dev = {
175         .fops   = &sys_state_fops,
176         .name   = "sys_state",
177         .minor  = MISC_DYNAMIC_MINOR,
178 };
179
180 static void cpufreq_temp_limit_work_func(struct work_struct *work)
181 {
182         static bool in_perf = false;
183         struct cpufreq_policy *policy;
184         int temp, i;
185         unsigned int new_freq = -1;
186         unsigned long delay = HZ / 10; // 100ms
187         unsigned int nr_cpus = num_online_cpus();
188         const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
189         size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
190
191         temp = get_temp();
192
193         if (sys_state == '1') {
194                 in_perf = true;
195                 if (gpu_is_mali400) {
196                         unsigned int gpu_irqs[2];
197                         gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
198                         msleep(40);
199                         gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
200                         delay = 0;
201                         if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
202                                 limits_table = temp_limits_cpu_perf;
203                                 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
204                         } else {
205                                 limits_table = temp_limits_gpu_perf;
206                                 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
207                         }
208                 } else {
209                         delay = HZ; // 1s
210                         limits_table = temp_limits_cpu_perf;
211                         limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
212                 }
213         } else if (in_perf) {
214                 in_perf = false;
215         } else {
216                 static u64 last_time_in_idle = 0;
217                 static u64 last_time_in_idle_timestamp = 0;
218                 u64 time_in_idle = 0, now;
219                 u32 delta_idle;
220                 u32 delta_time;
221                 unsigned cpu;
222
223                 for_each_online_cpu(cpu) {
224                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
225                 }
226                 delta_time = now - last_time_in_idle_timestamp;
227                 delta_idle = time_in_idle - last_time_in_idle;
228                 last_time_in_idle = time_in_idle;
229                 last_time_in_idle_timestamp = now;
230                 delta_idle += delta_time >> 4; // +6.25%
231                 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
232                         limits_table = temp_limits[0];
233                 else if (delta_idle > (nr_cpus - 2) * delta_time)
234                         limits_table = temp_limits[1];
235                 else if (delta_idle > (nr_cpus - 3) * delta_time)
236                         limits_table = temp_limits[2];
237                 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
238         }
239
240         for (i = 0; i < limits_size; i++) {
241                 if (temp >= limits_table[i].index) {
242                         new_freq = limits_table[i].frequency;
243                 }
244         }
245
246         if (temp_limit_freq != new_freq) {
247                 unsigned int cur_freq;
248                 temp_limit_freq = new_freq;
249                 cur_freq = cpufreq_get_rate(0);
250                 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
251                 if (cur_freq > temp_limit_freq) {
252                         policy = cpufreq_cpu_get(0);
253                         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
254                         cpufreq_cpu_put(policy);
255                 }
256         }
257
258         queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
259 }
260
261 static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
262
263 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
264 {
265         struct cpufreq_policy *policy = data;
266
267         if (val != CPUFREQ_NOTIFY)
268                 return 0;
269
270         if (cpufreq_is_ondemand(policy)) {
271                 FREQ_DBG("queue work\n");
272                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
273         } else {
274                 FREQ_DBG("cancel work\n");
275                 cancel_delayed_work_sync(&cpufreq_temp_limit_work);
276         }
277
278         return 0;
279 }
280
281 static struct notifier_block notifier_policy_block = {
282         .notifier_call = cpufreq_notifier_policy
283 };
284
285 static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
286 {
287         unsigned int i;
288         struct cpufreq_frequency_table *table;
289
290         table = temp_limits[0];
291         for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
292                 table[i].frequency = get_freq_from_table(table[i].frequency);
293         }
294         table = temp_limits_cpu_perf;
295         for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
296                 table[i].frequency = get_freq_from_table(table[i].frequency);
297         }
298         table = temp_limits_gpu_perf;
299         for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
300                 table[i].frequency = get_freq_from_table(table[i].frequency);
301         }
302         misc_register(&sys_state_dev);
303         if (cpufreq_is_ondemand(policy)) {
304                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
305         }
306         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
307 }
308
309 static void cpufreq_temp_limit_exit(void)
310 {
311         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
312         if (freq_wq)
313                 cancel_delayed_work(&cpufreq_temp_limit_work);
314 }
315 #else
316 static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
317 static inline void cpufreq_temp_limit_exit(void) {}
318 #endif
319
320 static int cpufreq_verify(struct cpufreq_policy *policy)
321 {
322         if (!freq_table)
323                 return -EINVAL;
324         return cpufreq_frequency_table_verify(policy, freq_table);
325 }
326
327 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
328 {
329         unsigned int i;
330         int ret;
331         struct cpufreq_freqs freqs;
332         struct cpufreq_policy *policy;
333         
334         freqs.new = rate / 1000;
335         freqs.old = clk_get_rate(clk) / 1000;
336         
337         for_each_online_cpu(freqs.cpu) {
338                 policy = cpufreq_cpu_get(freqs.cpu);
339                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
340                 cpufreq_cpu_put(policy);
341         }
342         
343         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
344         
345         ret = clk_set_rate(clk, rate);
346
347 #ifdef CONFIG_SMP
348         /*
349          * Note that loops_per_jiffy is not updated on SMP systems in
350          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
351          * on frequency transition. We need to update all dependent CPUs.
352          */
353         for_each_possible_cpu(i) {
354                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
355         }
356 #endif
357
358         freqs.new = clk_get_rate(clk) / 1000;
359         /* notifiers */
360         for_each_online_cpu(freqs.cpu) {
361                 policy = cpufreq_cpu_get(freqs.cpu);
362                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
363                 cpufreq_cpu_put(policy);
364         }
365
366         return ret;
367         
368 }
369
370 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
371 {
372         unsigned int i;
373         gpu_is_mali400 = cpu_is_rk3188();
374
375         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
376         if (clk_gpu_dvfs_node){
377                 clk_enable_dvfs(clk_gpu_dvfs_node);
378                 if (gpu_is_mali400)
379                         dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000); 
380         }
381
382         clk_vepu_dvfs_node = clk_get_dvfs_node("clk_vepu");
383         if (clk_vepu_dvfs_node){
384                 clk_enable_dvfs(clk_vepu_dvfs_node);
385                 dvfs_clk_set_rate(clk_vepu_dvfs_node, 198000000);
386         }
387
388         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
389         if (!clk_cpu_dvfs_node){
390                 return -EINVAL;
391         }
392         dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
393         freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
394         if (freq_table == NULL) {
395                 freq_table = default_freq_table;
396         } else {
397                 int v = INT_MAX;
398                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
399                         if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
400                                 suspend_freq = freq_table[i].frequency;
401                                 v = freq_table[i].index;
402                         }
403                 }
404         }
405         low_battery_freq = get_freq_from_table(low_battery_freq);
406         clk_enable_dvfs(clk_cpu_dvfs_node);
407
408         freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
409         cpufreq_temp_limit_init(policy);
410
411         printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
412         return 0;
413 }
414
415 static int cpufreq_init(struct cpufreq_policy *policy)
416 {
417         static int cpu0_err;
418         
419         if (policy->cpu == 0) {
420                 cpu0_err = cpufreq_init_cpu0(policy);
421         }
422         
423         if (cpu0_err)
424                 return cpu0_err;
425         
426         //set freq min max
427         cpufreq_frequency_table_cpuinfo(policy, freq_table);
428         //sys nod
429         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
430
431
432         policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
433
434         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
435
436         /*
437          * On SMP configuartion, both processors share the voltage
438          * and clock. So both CPUs needs to be scaled together and hence
439          * needs software co-ordination. Use cpufreq affected_cpus
440          * interface to handle this scenario. Additional is_smp() check
441          * is to keep SMP_ON_UP build working.
442          */
443         if (is_smp())
444                 cpumask_setall(policy->cpus);
445
446         return 0;
447
448 }
449
450 static int cpufreq_exit(struct cpufreq_policy *policy)
451 {
452         if (policy->cpu != 0)
453                 return 0;
454
455         cpufreq_frequency_table_cpuinfo(policy, freq_table);
456         clk_put_dvfs_node(clk_cpu_dvfs_node);
457         cpufreq_temp_limit_exit();
458         if (freq_wq) {
459                 flush_workqueue(freq_wq);
460                 destroy_workqueue(freq_wq);
461                 freq_wq = NULL;
462         }
463
464         return 0;
465 }
466
467 static struct freq_attr *cpufreq_attr[] = {
468         &cpufreq_freq_attr_scaling_available_freqs,
469         NULL,
470 };
471
472 //#ifdef CONFIG_POWER_SUPPLY
473 #if 0
474 extern int rk_get_system_battery_capacity(void);
475 #else
476 static int rk_get_system_battery_capacity(void) { return 100; }
477 #endif
478
479 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
480 {
481         bool is_ondemand = cpufreq_is_ondemand(policy);
482
483         if (!is_ondemand)
484                 return target_freq;
485
486         if (is_booting) {
487                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
488                 if (boottime_ms > 60 * MSEC_PER_SEC) {
489                         is_booting = false;
490                 } else if (target_freq > low_battery_freq &&
491                            rk_get_system_battery_capacity() <= low_battery_capacity) {
492                         target_freq = low_battery_freq;
493                 }
494         }
495
496 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
497         {
498                 static unsigned int ondemand_target = 816 * 1000;
499                 if (is_private)
500                         target_freq = ondemand_target;
501                 else
502                         ondemand_target = target_freq;
503         }
504
505         /*
506          * If the new frequency is more than the thermal max allowed
507          * frequency, go ahead and scale the mpu device to proper frequency.
508          */
509         target_freq = min(target_freq, temp_limit_freq);
510 #endif
511
512         return target_freq;
513 }
514
515 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
516 {
517         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
518         int ret = 0;
519         bool is_private;
520
521         if (!freq_table) {
522                 FREQ_ERR("no freq table!\n");
523                 return -EINVAL;
524         }
525
526         mutex_lock(&cpufreq_mutex);
527
528         is_private = relation & CPUFREQ_PRIVATE;
529         relation &= ~CPUFREQ_PRIVATE;
530
531         if (relation & ENABLE_FURTHER_CPUFREQ)
532                 no_cpufreq_access--;
533         if (no_cpufreq_access) {
534                 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
535                 ret = -EINVAL;
536                 goto out;
537         }
538         if (relation & DISABLE_FURTHER_CPUFREQ)
539                 no_cpufreq_access++;
540         relation &= ~MASK_FURTHER_CPUFREQ;
541
542         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
543         if (ret) {
544                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
545                 goto out;
546         }
547         new_freq = freq_table[i].frequency;
548         if (!no_cpufreq_access)
549                 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
550
551         new_rate = new_freq * 1000;
552         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
553         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
554         if (new_rate == cur_rate)
555                 goto out;
556         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
557
558 out:
559         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
560         mutex_unlock(&cpufreq_mutex);
561         return ret;
562
563 }
564
565 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
566 {
567         int ret = NOTIFY_DONE;
568         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
569
570         if (!policy)
571                 return ret;
572
573         if (!cpufreq_is_ondemand(policy))
574                 goto out;
575
576         switch (event) {
577         case PM_SUSPEND_PREPARE:
578                 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
579                 if (ret < 0) {
580                         ret = NOTIFY_BAD;
581                         goto out;
582                 }
583                 ret = NOTIFY_OK;
584                 break;
585         case PM_POST_RESTORE:
586         case PM_POST_SUSPEND:
587                 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
588                 ret = NOTIFY_OK;
589                 break;
590         }
591 out:
592         cpufreq_cpu_put(policy);
593         return ret;
594 }
595
596 static struct notifier_block cpufreq_pm_notifier = {
597         .notifier_call = cpufreq_pm_notifier_event,
598 };
599
600 static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
601 {
602         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
603
604         if (policy) {
605                 is_booting = false;
606                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
607                 cpufreq_cpu_put(policy);
608         }
609
610         return NOTIFY_OK;
611 }
612
613 static struct notifier_block cpufreq_reboot_notifier = {
614         .notifier_call = cpufreq_reboot_notifier_event,
615 };
616
617 static struct cpufreq_driver cpufreq_driver = {
618         .flags = CPUFREQ_CONST_LOOPS,
619         .verify = cpufreq_verify,
620         .target = cpufreq_target,
621         .get = cpufreq_get_rate,
622         .init = cpufreq_init,
623         .exit = cpufreq_exit,
624         .name = "rockchip",
625         .attr = cpufreq_attr,
626 };
627
628 static int __init cpufreq_driver_init(void)
629 {
630         register_pm_notifier(&cpufreq_pm_notifier);
631         register_reboot_notifier(&cpufreq_reboot_notifier);
632         return cpufreq_register_driver(&cpufreq_driver);
633 }
634
635 device_initcall(cpufreq_driver_init);