rk29: cpufreq: always enable wq
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rk29 / cpufreq.c
1 /* arch/arm/mach-rk2818/cpufreq.c
2  *
3  * Copyright (C) 2010 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15
16 #ifdef CONFIG_CPU_FREQ_DEBUG
17 #define DEBUG
18 #endif
19 #define pr_fmt(fmt) "%s: " fmt, __func__
20
21 #include <linux/clk.h>
22 #include <linux/cpufreq.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/reboot.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/suspend.h>
28 #include <linux/tick.h>
29 #include <linux/workqueue.h>
30 #include <mach/cpufreq.h>
31 #include <../../../drivers/video/rk29_fb.h>
32
33 #define MHZ     (1000*1000)
34 #define KHZ     1000
35
36 static int no_cpufreq_access;
37
38 static struct cpufreq_frequency_table default_freq_table[] = {
39 //      { .index = 1100000, .frequency =   24000 },
40 //      { .index = 1200000, .frequency =  204000 },
41 //      { .index = 1200000, .frequency =  300000 },
42         { .index = 1200000, .frequency =  408000 },
43 //      { .index = 1200000, .frequency =  600000 },
44         { .index = 1200000, .frequency =  816000 }, /* must enable, see SLEEP_FREQ above */
45 //      { .index = 1250000, .frequency = 1008000 },
46 //      { .index = 1300000, .frequency = 1104000 },
47 //      { .index = 1400000, .frequency = 1176000 },
48 //      { .index = 1400000, .frequency = 1200000 },
49         { .frequency = CPUFREQ_TABLE_END },
50 };
51 static struct cpufreq_frequency_table *freq_table = default_freq_table;
52 static struct clk *arm_clk;
53 static struct clk *ddr_clk;
54 static DEFINE_MUTEX(mutex);
55
56 #ifdef CONFIG_REGULATOR
57 static struct regulator *vcore;
58 static int vcore_uV;
59 #define CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
60 #endif
61
62 static struct workqueue_struct *wq;
63
64 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
65 static int limit = 1;
66 module_param(limit, int, 0644);
67
68 #define LIMIT_SECS      30
69 static int limit_secs = LIMIT_SECS;
70 module_param(limit_secs, int, 0644);
71
72 static int limit_temp;
73 module_param(limit_temp, int, 0444);
74
75 #define LIMIT_AVG_VOLTAGE       1225000 /* vU */
76 #else /* !CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP */
77 #define LIMIT_AVG_VOLTAGE       1400000 /* vU */
78 #endif /* CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP */
79
80 enum {
81         DEBUG_CHANGE    = 1U << 0,
82         DEBUG_TEMP      = 1U << 1,
83         DEBUG_DISP      = 1U << 2,
84 };
85 static uint debug_mask = DEBUG_CHANGE;
86 module_param(debug_mask, uint, 0644);
87 #define dprintk(mask, fmt, ...) do { if (mask & debug_mask) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
88
89 #define LIMIT_AVG_FREQ  (816 * KHZ) /* kHz */
90 static unsigned int limit_avg_freq = LIMIT_AVG_FREQ;
91 static int rk29_cpufreq_set_limit_avg_freq(const char *val, struct kernel_param *kp)
92 {
93         int err = param_set_uint(val, kp);
94         if (!err) {
95                 board_update_cpufreq_table(freq_table);
96         }
97         return err;
98 }
99 module_param_call(limit_avg_freq, rk29_cpufreq_set_limit_avg_freq, param_get_uint, &limit_avg_freq, 0644);
100
101 static int limit_avg_index = -1;
102
103 static unsigned int limit_avg_voltage = LIMIT_AVG_VOLTAGE;
104 static int rk29_cpufreq_set_limit_avg_voltage(const char *val, struct kernel_param *kp)
105 {
106         int err = param_set_uint(val, kp);
107         if (!err) {
108                 board_update_cpufreq_table(freq_table);
109         }
110         return err;
111 }
112 module_param_call(limit_avg_voltage, rk29_cpufreq_set_limit_avg_voltage, param_get_uint, &limit_avg_voltage, 0644);
113
114 #define CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
115 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
116 static bool limit_fb1_is_on;
117 static bool limit_hdmi_is_on;
118 static inline bool aclk_limit(void) { return limit_hdmi_is_on && limit_fb1_is_on; }
119 static int limit_index_816 = -1;
120 static int limit_index_1008 = -1;
121 module_param(limit_fb1_is_on, bool, 0644);
122 module_param(limit_hdmi_is_on, bool, 0644);
123 module_param(limit_index_816, int, 0444);
124 module_param(limit_index_1008, int, 0444);
125 #else
126 static inline bool aclk_limit(void) { return false; }
127 #endif
128
129 static bool rk29_cpufreq_is_ondemand_policy(struct cpufreq_policy *policy)
130 {
131         char c = 0;
132         if (policy && policy->governor)
133                 c = policy->governor->name[0];
134         return (c == 'o' || c == 'i' || c == 'c');
135 }
136
137 static void board_do_update_cpufreq_table(struct cpufreq_frequency_table *table)
138 {
139         unsigned int i;
140
141         limit_avg_freq = 0;
142         limit_avg_index = -1;
143 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
144         limit_index_816 = -1;
145         limit_index_1008 = -1;
146 #endif
147
148         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
149                 table[i].frequency = clk_round_rate(arm_clk, table[i].frequency * KHZ) / KHZ;
150                 if (table[i].index <= limit_avg_voltage && limit_avg_freq < table[i].frequency) {
151                         limit_avg_freq = table[i].frequency;
152                         limit_avg_index = i;
153                 }
154 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
155                 if (table[i].frequency <= 816 * KHZ &&
156                     (limit_index_816 < 0 ||
157                     (limit_index_816 >= 0 && table[limit_index_816].frequency < table[i].frequency)))
158                         limit_index_816 = i;
159                 if (table[i].frequency <= 1008 * KHZ &&
160                     (limit_index_1008 < 0 ||
161                     (limit_index_1008 >= 0 && table[limit_index_1008].frequency < table[i].frequency)))
162                         limit_index_1008 = i;
163 #endif
164         }
165
166         if (!limit_avg_freq)
167                 limit_avg_freq = LIMIT_AVG_FREQ;
168 }
169
170 int board_update_cpufreq_table(struct cpufreq_frequency_table *table)
171 {
172         mutex_lock(&mutex);
173         if (arm_clk) {
174                 board_do_update_cpufreq_table(table);
175         }
176         freq_table = table;
177         mutex_unlock(&mutex);
178         return 0;
179 }
180
181 static int rk29_cpufreq_verify(struct cpufreq_policy *policy)
182 {
183         return cpufreq_frequency_table_verify(policy, freq_table);
184 }
185
186 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
187 static void rk29_cpufreq_limit_by_temp(struct cpufreq_policy *policy, unsigned int relation, int *index)
188 {
189         int c, ms;
190         ktime_t now;
191         static ktime_t last = { .tv64 = 0 };
192         cputime64_t wall;
193         u64 idle_time_us;
194         static u64 last_idle_time_us;
195         unsigned int cur = policy->cur;
196
197         if (!limit || !rk29_cpufreq_is_ondemand_policy(policy) ||
198             (limit_avg_index < 0) || (relation & MASK_FURTHER_CPUFREQ)) {
199                 limit_temp = 0;
200                 last.tv64 = 0;
201                 return;
202         }
203
204         idle_time_us = get_cpu_idle_time_us(0, &wall);
205         now = ktime_get();
206         if (!last.tv64) {
207                 last = now;
208                 last_idle_time_us = idle_time_us;
209                 return;
210         }
211
212         limit_temp -= idle_time_us - last_idle_time_us; // -1000
213         dprintk(DEBUG_TEMP, "idle %lld us (%lld - %lld)\n", idle_time_us - last_idle_time_us, idle_time_us, last_idle_time_us);
214         last_idle_time_us = idle_time_us;
215
216         ms = div_u64(ktime_us_delta(now, last), 1000);
217         dprintk(DEBUG_TEMP, "%d kHz (%d uV) elapsed %d ms (%lld - %lld)\n", cur, vcore_uV, ms, now.tv64, last.tv64);
218         last = now;
219
220         if (cur <= 408 * 1000)
221                 c = -325;
222         else if (cur <= 624 * 1000)
223                 c = -202;
224         else if (cur <= limit_avg_freq)
225                 c = -78;
226         else
227                 c = 325;
228         limit_temp += c * ms;
229
230         if (limit_temp < 0)
231                 limit_temp = 0;
232         if (limit_temp > 325 * limit_secs * 1000 && freq_table[*index].frequency > limit_avg_freq)
233                 *index = limit_avg_index;
234         dprintk(DEBUG_TEMP, "c %d temp %d (%s) index %d", c, limit_temp, limit_temp > 325 * limit_secs * 1000 ? "overheat" : "normal", *index);
235 }
236 #else
237 #define rk29_cpufreq_limit_by_temp(...) do {} while (0)
238 #endif
239
240 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
241 static void rk29_cpufreq_limit_by_disp(int *index)
242 {
243         unsigned long ddr_rate;
244         unsigned int frequency = freq_table[*index].frequency;
245         int new_index = -1;
246
247         if (!aclk_limit())
248                 return;
249
250         ddr_rate = clk_get_rate(ddr_clk);
251
252         if (ddr_rate < 492 * MHZ) {
253                 if (limit_index_816 >= 0 && frequency > 816 * KHZ)
254                         new_index = limit_index_816;
255         } else {
256                 if (limit_index_1008 >= 0 && frequency > 1008 * KHZ)
257                         new_index = limit_index_1008;
258         }
259
260         if (new_index != -1) {
261                 dprintk(DEBUG_DISP, "old %d new %d\n", freq_table[*index].frequency, freq_table[new_index].frequency);
262                 *index = new_index;
263         }
264 }
265 #else
266 #define rk29_cpufreq_limit_by_disp(...) do {} while (0)
267 #endif
268
269 static int rk29_cpufreq_do_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
270 {
271         int index;
272         int new_vcore_uV;
273         struct cpufreq_freqs freqs;
274         const struct cpufreq_frequency_table *freq;
275         int err = 0;
276         bool force = relation & CPUFREQ_FORCE_CHANGE;
277
278         relation &= ~CPUFREQ_FORCE_CHANGE;
279
280         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
281             (relation & DISABLE_FURTHER_CPUFREQ)) {
282                 /* Invalidate both if both marked */
283                 relation &= ~ENABLE_FURTHER_CPUFREQ;
284                 relation &= ~DISABLE_FURTHER_CPUFREQ;
285                 pr_err("denied marking FURTHER_CPUFREQ as both marked.\n");
286         }
287         if (relation & ENABLE_FURTHER_CPUFREQ)
288                 no_cpufreq_access--;
289         if (no_cpufreq_access) {
290 #ifdef CONFIG_PM_VERBOSE
291                 pr_err("denied access to %s as it is disabled temporarily\n", __func__);
292 #endif
293                 return -EINVAL;
294         }
295         if (relation & DISABLE_FURTHER_CPUFREQ)
296                 no_cpufreq_access++;
297
298         if (cpufreq_frequency_table_target(policy, freq_table, target_freq, relation & ~MASK_FURTHER_CPUFREQ, &index)) {
299                 pr_err("invalid target_freq: %d\n", target_freq);
300                 return -EINVAL;
301         }
302         rk29_cpufreq_limit_by_disp(&index);
303         rk29_cpufreq_limit_by_temp(policy, relation, &index);
304         freq = &freq_table[index];
305
306         if (policy->cur == freq->frequency && !force)
307                 return 0;
308
309         freqs.old = policy->cur;
310         freqs.new = freq->frequency;
311         freqs.cpu = 0;
312         new_vcore_uV = freq->index;
313         dprintk(DEBUG_CHANGE, "%d Hz r %d(%c) selected %d Hz (%d uV)\n",
314                 target_freq, relation, relation & CPUFREQ_RELATION_H ? 'H' : 'L',
315                 freq->frequency, new_vcore_uV);
316
317 #ifdef CONFIG_REGULATOR
318         if (vcore && freqs.new > freqs.old && vcore_uV != new_vcore_uV) {
319                 int err = regulator_set_voltage(vcore, new_vcore_uV, new_vcore_uV);
320                 if (err) {
321                         pr_err("fail to set vcore (%d uV) for %d kHz: %d\n",
322                                 new_vcore_uV, freqs.new, err);
323                         return err;
324                 } else {
325                         vcore_uV = new_vcore_uV;
326                 }
327         }
328 #endif
329
330         cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
331         dprintk(DEBUG_CHANGE, "pre change\n");
332         clk_set_rate(arm_clk, freqs.new * KHZ + aclk_limit());
333         dprintk(DEBUG_CHANGE, "post change\n");
334         freqs.new = clk_get_rate(arm_clk) / KHZ;
335         cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
336
337 #ifdef CONFIG_REGULATOR
338         if (vcore && freqs.new < freqs.old && vcore_uV != new_vcore_uV) {
339                 int err = regulator_set_voltage(vcore, new_vcore_uV, new_vcore_uV);
340                 if (err) {
341                         pr_err("fail to set vcore (%d uV) for %d kHz: %d\n",
342                                 new_vcore_uV, freqs.new, err);
343                 } else {
344                         vcore_uV = new_vcore_uV;
345                 }
346         }
347 #endif
348         dprintk(DEBUG_CHANGE, "ok, got %d kHz\n", freqs.new);
349
350         return err;
351 }
352
353 static int rk29_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
354 {
355         int err;
356
357         if (!policy || policy->cpu != 0)
358                 return -EINVAL;
359
360         mutex_lock(&mutex);
361         err = rk29_cpufreq_do_target(policy, target_freq, relation);
362         mutex_unlock(&mutex);
363
364         return err;
365 }
366
367 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
368 static void rk29_cpufreq_limit_by_temp_work_func(struct work_struct *work);
369 static DECLARE_DELAYED_WORK(rk29_cpufreq_limit_by_temp_work, rk29_cpufreq_limit_by_temp_work_func);
370 #define WORK_DELAY HZ
371
372 static int rk29_cpufreq_notifier_policy(struct notifier_block *nb,
373                 unsigned long val, void *data)
374 {
375         struct cpufreq_policy *policy = data;
376
377         if (val != CPUFREQ_NOTIFY)
378                 return 0;
379
380         if (rk29_cpufreq_is_ondemand_policy(policy)) {
381                 dprintk(DEBUG_TEMP, "queue work\n");
382                 queue_delayed_work(wq, &rk29_cpufreq_limit_by_temp_work, WORK_DELAY);
383         } else {
384                 dprintk(DEBUG_TEMP, "cancel work\n");
385                 cancel_delayed_work(&rk29_cpufreq_limit_by_temp_work);
386         }
387
388         return 0;
389 }
390
391 static struct notifier_block notifier_policy_block = {
392         .notifier_call = rk29_cpufreq_notifier_policy
393 };
394
395 static void rk29_cpufreq_limit_by_temp_work_func(struct work_struct *work)
396 {
397         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
398
399         if (policy) {
400                 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
401                 cpufreq_cpu_put(policy);
402         }
403         queue_delayed_work(wq, &rk29_cpufreq_limit_by_temp_work, WORK_DELAY);
404 }
405 #endif
406
407 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
408 static void rk29_cpufreq_limit_by_disp_work_func(struct work_struct *work)
409 {
410         struct cpufreq_policy *policy;
411
412         policy = cpufreq_cpu_get(0);
413         if (policy) {
414                 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_FORCE_CHANGE);
415                 cpufreq_cpu_put(policy);
416         }
417 }
418
419 static DECLARE_WORK(rk29_cpufreq_limit_by_disp_work, rk29_cpufreq_limit_by_disp_work_func);
420
421 static int rk29_cpufreq_fb_notifier_event(struct notifier_block *this,
422                 unsigned long event, void *ptr)
423 {
424         switch (event) {
425         case RK29FB_EVENT_HDMI_ON:
426                 limit_hdmi_is_on = true;
427                 break;
428         case RK29FB_EVENT_HDMI_OFF:
429                 limit_hdmi_is_on = false;
430                 break;
431         case RK29FB_EVENT_FB1_ON:
432                 limit_fb1_is_on = true;
433                 break;
434         case RK29FB_EVENT_FB1_OFF:
435                 limit_fb1_is_on = false;
436                 break;
437         }
438
439         dprintk(DEBUG_DISP, "event: %lu aclk_limit: %d\n", event, aclk_limit());
440         flush_work(&rk29_cpufreq_limit_by_disp_work);
441         queue_work(wq, &rk29_cpufreq_limit_by_disp_work);
442
443         return NOTIFY_OK;
444 }
445
446 static struct notifier_block rk29_cpufreq_fb_notifier = {
447         .notifier_call = rk29_cpufreq_fb_notifier_event,
448 };
449 #endif
450
451 static int rk29_cpufreq_init(struct cpufreq_policy *policy)
452 {
453         if (policy->cpu != 0)
454                 return -EINVAL;
455
456         arm_clk = clk_get(NULL, "arm_pll");
457         if (IS_ERR(arm_clk)) {
458                 int err = PTR_ERR(arm_clk);
459                 pr_err("fail to get arm_pll clk: %d\n", err);
460                 arm_clk = NULL;
461                 return err;
462         }
463
464         ddr_clk = clk_get(NULL, "ddr");
465         if (IS_ERR(ddr_clk)) {
466                 int err = PTR_ERR(ddr_clk);
467                 pr_err("fail to get ddr clk: %d\n", err);
468                 ddr_clk = NULL;
469                 return err;
470         }
471
472 #ifdef CONFIG_REGULATOR
473         vcore = regulator_get(NULL, "vcore");
474         if (IS_ERR(vcore)) {
475                 pr_err("fail to get regulator vcore: %ld\n", PTR_ERR(vcore));
476                 vcore = NULL;
477         }
478 #endif
479
480         board_update_cpufreq_table(freq_table); /* force update frequency */
481         BUG_ON(cpufreq_frequency_table_cpuinfo(policy, freq_table));
482         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
483         policy->cur = clk_get_rate(arm_clk) / KHZ;
484
485         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make default sampling_rate to 40000
486
487         wq = create_singlethread_workqueue("rk29_cpufreqd");
488         if (!wq) {
489                 pr_err("fail to create workqueue\n");
490                 return -ENOMEM;
491         }
492
493 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
494         if (rk29_cpufreq_is_ondemand_policy(policy)) {
495                 dprintk(DEBUG_TEMP, "start wq\n");
496                 queue_delayed_work(wq, &rk29_cpufreq_limit_by_temp_work, WORK_DELAY);
497         }
498         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
499 #endif
500 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
501         rk29fb_register_notifier(&rk29_cpufreq_fb_notifier);
502 #endif
503         return 0;
504 }
505
506 static int rk29_cpufreq_exit(struct cpufreq_policy *policy)
507 {
508 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_DISP
509         rk29fb_unregister_notifier(&rk29_cpufreq_fb_notifier);
510 #endif
511 #ifdef CONFIG_RK29_CPU_FREQ_LIMIT_BY_TEMP
512         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
513         if (wq)
514                 cancel_delayed_work(&rk29_cpufreq_limit_by_temp_work);
515 #endif
516         if (wq) {
517                 flush_workqueue(wq);
518                 destroy_workqueue(wq);
519                 wq = NULL;
520         }
521 #ifdef CONFIG_REGULATOR
522         if (vcore)
523                 regulator_put(vcore);
524 #endif
525         clk_put(ddr_clk);
526         clk_put(arm_clk);
527         return 0;
528 }
529
530 static struct freq_attr *rk29_cpufreq_attr[] = {
531         &cpufreq_freq_attr_scaling_available_freqs,
532         NULL,
533 };
534
535 static struct cpufreq_driver rk29_cpufreq_driver = {
536         .flags          = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
537         .init           = rk29_cpufreq_init,
538         .exit           = rk29_cpufreq_exit,
539         .verify         = rk29_cpufreq_verify,
540         .target         = rk29_cpufreq_target,
541         .name           = "rk29",
542         .attr           = rk29_cpufreq_attr,
543 };
544
545 static int rk29_cpufreq_pm_notifier_event(struct notifier_block *this,
546                 unsigned long event, void *ptr)
547 {
548         int ret = NOTIFY_DONE;
549         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
550
551         if (!policy)
552                 return ret;
553
554         if (!rk29_cpufreq_is_ondemand_policy(policy))
555                 goto out;
556
557         switch (event) {
558         case PM_SUSPEND_PREPARE:
559                 ret = cpufreq_driver_target(policy, limit_avg_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_L);
560                 if (ret < 0) {
561                         ret = NOTIFY_BAD;
562                         goto out;
563                 }
564                 ret = NOTIFY_OK;
565                 break;
566         case PM_POST_RESTORE:
567         case PM_POST_SUSPEND:
568                 cpufreq_driver_target(policy, limit_avg_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_L);
569                 ret = NOTIFY_OK;
570                 break;
571         }
572 out:
573         cpufreq_cpu_put(policy);
574         return ret;
575 }
576
577 static struct notifier_block rk29_cpufreq_pm_notifier = {
578         .notifier_call = rk29_cpufreq_pm_notifier_event,
579 };
580
581 static int rk29_cpufreq_reboot_notifier_event(struct notifier_block *this,
582                 unsigned long event, void *ptr)
583 {
584         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
585
586         if (policy) {
587                 cpufreq_driver_target(policy, limit_avg_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_L);
588                 cpufreq_cpu_put(policy);
589         }
590
591         return NOTIFY_OK;
592 }
593
594 static struct notifier_block rk29_cpufreq_reboot_notifier = {
595         .notifier_call = rk29_cpufreq_reboot_notifier_event,
596 };
597
598 static int __init rk29_cpufreq_register(void)
599 {
600         register_pm_notifier(&rk29_cpufreq_pm_notifier);
601         register_reboot_notifier(&rk29_cpufreq_reboot_notifier);
602
603         return cpufreq_register_driver(&rk29_cpufreq_driver);
604 }
605
606 device_initcall(rk29_cpufreq_register);
607