1 /* arch/arm/mach-rk30/rk30_dvfs.c
\r
3 * Copyright (C) 2012 ROCKCHIP, Inc.
\r
5 * This software is licensed under the terms of the GNU General Public
\r
6 * License version 2, as published by the Free Software Foundation, and
\r
7 * may be copied, distributed, and modified under those terms.
\r
9 * This program is distributed in the hope that it will be useful,
\r
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
12 * GNU General Public License for more details.
\r
16 #include <linux/kernel.h>
\r
17 #include <linux/err.h>
\r
18 #include <linux/spinlock.h>
\r
19 #include <linux/list.h>
\r
20 #include <linux/slab.h>
\r
21 #include <linux/clk.h>
\r
22 #include <linux/cpufreq.h>
\r
23 #include <mach/dvfs.h>
\r
24 #include <mach/clock.h>
\r
25 #include <linux/regulator/consumer.h>
\r
26 #include <linux/delay.h>
\r
27 #include <linux/io.h>
\r
28 #include <linux/hrtimer.h>
\r
30 #include <mach/io.h>
\r
31 #include <mach/cru.h>
\r
32 #include <mach/grf-rk3066b.h>
\r
34 #define MHZ (1000 * 1000)
\r
36 #define CLK_LOOPS_JIFFY_REF 11996091ULL
\r
37 #define CLK_LOOPS_RATE_REF (1200) //Mhz
\r
38 #define CLK_LOOPS_RECALC(new_rate) div_u64(CLK_LOOPS_JIFFY_REF*(new_rate),CLK_LOOPS_RATE_REF*MHZ)
\r
39 static struct clk *clk_cpu = NULL, *clk_cpu_div = NULL, *arm_pll_clk = NULL, *general_pll_clk = NULL;
\r
40 static unsigned long lpj_24m;
\r
42 struct gate_delay_table {
\r
43 unsigned long arm_perf;
\r
44 unsigned long log_perf;
\r
45 unsigned long delay;
\r
48 struct cycle_by_rate {
\r
49 unsigned long rate_khz;
\r
50 unsigned long cycle_ns;
\r
53 struct uoc_val_xx2delay {
\r
56 unsigned long uoc_val_01;
\r
57 unsigned long uoc_val_11;
\r
60 struct dvfs_volt_performance {
\r
62 unsigned long perf; // Gate performance
\r
64 static int rk_dvfs_clk_notifier_event(struct notifier_block *this,
\r
65 unsigned long event, void *ptr)
\r
67 struct clk_notifier_data *noti_info;
\r
69 struct clk_node *dvfs_clk;
\r
70 noti_info = (struct clk_notifier_data *)ptr;
\r
71 clk = noti_info->clk;
\r
72 dvfs_clk = clk->dvfs_info;
\r
75 case CLK_PRE_RATE_CHANGE:
\r
76 DVFS_DBG("%s CLK_PRE_RATE_CHANGE\n", __func__);
\r
78 case CLK_POST_RATE_CHANGE:
\r
79 DVFS_DBG("%s CLK_POST_RATE_CHANGE\n", __func__);
\r
81 case CLK_ABORT_RATE_CHANGE:
\r
82 DVFS_DBG("%s CLK_ABORT_RATE_CHANGE\n", __func__);
\r
84 case CLK_PRE_ENABLE:
\r
85 DVFS_DBG("%s CLK_PRE_ENABLE\n", __func__);
\r
87 case CLK_POST_ENABLE:
\r
88 DVFS_DBG("%s CLK_POST_ENABLE\n", __func__);
\r
90 case CLK_ABORT_ENABLE:
\r
91 DVFS_DBG("%s CLK_ABORT_ENABLE\n", __func__);
\r
93 case CLK_PRE_DISABLE:
\r
94 DVFS_DBG("%s CLK_PRE_DISABLE\n", __func__);
\r
96 case CLK_POST_DISABLE:
\r
97 DVFS_DBG("%s CLK_POST_DISABLE\n", __func__);
\r
98 dvfs_clk->set_freq = 0;
\r
100 case CLK_ABORT_DISABLE:
\r
101 DVFS_DBG("%s CLK_ABORT_DISABLE\n", __func__);
\r
110 static struct notifier_block rk_dvfs_clk_notifier = {
\r
111 .notifier_call = rk_dvfs_clk_notifier_event,
\r
114 static unsigned long dvfs_volt_arm_support_table[] = {
\r
135 static unsigned long dvfs_volt_log_support_table[] = {
\r
158 * 电压 dly_line 每增加0.1V的增量 每增加0.1V增加的比例 与1v对比增加的比例
\r
160 * 1.10 157 29 1.23 1.23
\r
161 * 1.20 184 27 1.17 1.44
\r
162 * 1.30 209 25 1.14 1.63
\r
163 * 1.40 231 22 1.11 1.80
\r
164 * 1.50 251 20 1.09 1.96
\r
165 * This table is calc form func:
\r
166 * dly_line = 536 * volt - 116 * volt * volt - 292
\r
168 * dly_line unit: Gate
\r
170 * The table standard voltage is 1.0V, delay_line = 128(Gates)
\r
174 #define VP_TABLE_END (~0)
\r
175 static struct dvfs_volt_performance dvfs_vp_table[] = {
\r
176 {.volt = 850 * 1000, .perf = 350}, //623
\r
177 {.volt = 875 * 1000, .perf = 350}, //689
\r
178 {.volt = 900 * 1000, .perf = 350}, //753 make low arm freq uoc as small as posible
\r
179 {.volt = 925 * 1000, .perf = 450}, //817
\r
180 {.volt = 950 * 1000, .perf = 550}, //879
\r
181 {.volt = 975 * 1000, .perf = 650}, //940
\r
182 {.volt = 1000 * 1000, .perf = 750},
\r
183 {.volt = 1025 * 1000, .perf = 1100},
\r
184 {.volt = 1050 * 1000, .perf = 1125},
\r
185 {.volt = 1075 * 1000, .perf = 1173},
\r
186 {.volt = 1100 * 1000, .perf = 1230},
\r
187 {.volt = 1125 * 1000, .perf = 1283},
\r
188 {.volt = 1150 * 1000, .perf = 1336},
\r
189 {.volt = 1175 * 1000, .perf = 1388},
\r
190 {.volt = 1200 * 1000, .perf = 1440},
\r
191 {.volt = 1225 * 1000, .perf = 1620}, //1489
\r
192 {.volt = 1250 * 1000, .perf = 1660}, //1537
\r
193 {.volt = 1275 * 1000, .perf = 1700}, //1585
\r
194 {.volt = 1300 * 1000, .perf = 1720}, //1630 1.6Garm 600Mgpu, make uoc=2b'01
\r
195 {.volt = 1325 * 1000, .perf = 1740}, //1676
\r
196 {.volt = 1350 * 1000, .perf = 1760}, //1720
\r
197 {.volt = 1375 * 1000, .perf = 1780}, //1763
\r
198 {.volt = 1400 * 1000, .perf = 1800},
\r
199 {.volt = 1425 * 1000, .perf = 1846},
\r
200 {.volt = 1450 * 1000, .perf = 1885},
\r
201 {.volt = 1475 * 1000, .perf = 1924},
\r
202 {.volt = 1500 * 1000, .perf = 1960},
\r
203 {.volt = VP_TABLE_END},
\r
205 //>1.2V step = 50mV
\r
206 //ns (Magnified 10^6 times)
\r
207 #define VD_DELAY_ZOOM (1000UL * 1000UL)
\r
208 #define VD_ARM_DELAY 1350000UL
\r
209 #define VD_LOG_DELAY 877500UL
\r
211 #define L2_HOLD 40UL //located at 40%
\r
212 #define L2_SETUP 70UL //located at 70%
\r
214 #define UOC_VAL_00 0UL
\r
215 #define UOC_VAL_01 165000UL //0.9V(125`C):220000
\r
216 #define UOC_VAL_11 285000UL //0.9V(125`C):380000
\r
217 #define UOC_VAL_MIN 100UL //to work around get_delay=0
\r
219 #define SIZE_SUPPORT_ARM_VOLT ARRAY_SIZE(dvfs_volt_arm_support_table)
\r
220 #define SIZE_SUPPORT_LOG_VOLT ARRAY_SIZE(dvfs_volt_log_support_table)
\r
221 #define SIZE_VP_TABLE ARRAY_SIZE(dvfs_vp_table)
\r
222 #define SIZE_ARM_FREQ_TABLE 10
\r
223 static struct cycle_by_rate rate_cycle[SIZE_ARM_FREQ_TABLE];
\r
224 static int size_dvfs_arm_table = 0;
\r
226 static struct clk_node *dvfs_clk_cpu;
\r
227 static struct vd_node vd_core;
\r
228 static struct vd_node vd_cpu;
\r
230 static struct uoc_val_xx2delay uoc_val_xx[SIZE_VP_TABLE];
\r
231 static struct gate_delay_table gate_delay[SIZE_VP_TABLE][SIZE_VP_TABLE];
\r
233 static unsigned long dvfs_get_perf_byvolt(unsigned long volt)
\r
236 for (i = 0; dvfs_vp_table[i].volt != VP_TABLE_END; i++) {
\r
237 if (volt <= dvfs_vp_table[i].volt)
\r
238 return dvfs_vp_table[i].perf;
\r
243 static unsigned long dvfs_get_gate_delay_per_volt(unsigned long arm_perf, unsigned long log_perf)
\r
245 unsigned long gate_arm_delay, gate_log_delay;
\r
250 gate_arm_delay = VD_ARM_DELAY * 1000 / arm_perf;
\r
251 gate_log_delay = VD_LOG_DELAY * 1000 / log_perf;
\r
253 return (gate_arm_delay > gate_log_delay ? (gate_arm_delay - gate_log_delay) : 0);
\r
256 static int dvfs_gate_delay_init(void)
\r
260 for (i = 0; i < SIZE_VP_TABLE - 1; i++)
\r
261 for (j = 0; j < SIZE_VP_TABLE - 1; j++) {
\r
262 gate_delay[i][j].arm_perf = dvfs_vp_table[i].perf;
\r
263 gate_delay[i][j].log_perf = dvfs_vp_table[j].perf;
\r
264 gate_delay[i][j].delay = dvfs_get_gate_delay_per_volt(gate_delay[i][j].arm_perf,
\r
265 gate_delay[i][j].log_perf);
\r
267 //DVFS_DBG("%s: arm_perf=%lu, log_perf=%lu, delay=%lu\n", __func__,
\r
268 // gate_delay[i][j].arm_perf, gate_delay[i][j].log_perf,
\r
269 // gate_delay[i][j].delay);
\r
274 static unsigned long dvfs_get_gate_delay(unsigned long arm_perf, unsigned long log_perf)
\r
277 for (i = 0; i < SIZE_VP_TABLE - 1; i++) {
\r
278 if (gate_delay[i][0].arm_perf == arm_perf)
\r
281 for (j = 0; j < SIZE_VP_TABLE - 1; j++) {
\r
282 if (gate_delay[i][j].log_perf == log_perf)
\r
286 //DVFS_DBG("%s index_arm=%d, index_log=%d, delay=%lu\n", __func__, i, j, gate_delay[i][j].delay);
\r
287 //DVFS_DBG("%s perf_arm=%d, perf_log=%d, delay=%lu\n",
\r
288 // __func__, gate_delay[i][j].arm_perf , gate_delay[i][j].log_perf , gate_delay[i][j].delay);
\r
289 return gate_delay[i][j].delay;
\r
291 static int dvfs_uoc_val_delay_init(void)
\r
294 for (i = 0; i < SIZE_VP_TABLE - 1; i++) {
\r
295 uoc_val_xx[i].volt = dvfs_vp_table[i].volt;
\r
296 uoc_val_xx[i].perf = dvfs_vp_table[i].perf;
\r
297 uoc_val_xx[i].uoc_val_01 = UOC_VAL_01 * 1000 / uoc_val_xx[i].perf;
\r
298 uoc_val_xx[i].uoc_val_11 = UOC_VAL_11 * 1000 / uoc_val_xx[i].perf;
\r
299 //DVFS_DBG("volt=%lu, perf=%lu, uoc_01=%lu, uoc_11=%lu\n", uoc_val_xx[i].volt, uoc_val_xx[i].perf,
\r
300 // uoc_val_xx[i].uoc_val_01, uoc_val_xx[i].uoc_val_11);
\r
304 static unsigned long dvfs_get_uoc_val_xx_by_volt(unsigned long uoc_val_xx_delay, unsigned long volt)
\r
307 if (uoc_val_xx_delay == UOC_VAL_01) {
\r
308 for (i = 0; i < SIZE_VP_TABLE - 1; i++) {
\r
309 if (uoc_val_xx[i].volt == volt)
\r
310 return uoc_val_xx[i].uoc_val_01;
\r
313 } else if (uoc_val_xx_delay == UOC_VAL_11) {
\r
314 for (i = 0; i < SIZE_VP_TABLE - 1; i++) {
\r
315 if (uoc_val_xx[i].volt == volt)
\r
316 return uoc_val_xx[i].uoc_val_11;
\r
320 DVFS_ERR("%s UNKNOWN uoc_val_xx\n", __func__);
\r
322 DVFS_ERR("%s can not find uoc_val_xx=%lu, with volt=%lu\n", __func__, uoc_val_xx_delay, volt);
\r
323 return uoc_val_xx_delay;
\r
325 struct dvfs_volt_uoc {
\r
326 unsigned long volt_log;
\r
327 unsigned long volt_arm_new;
\r
328 unsigned long volt_log_new;
\r
331 struct dvfs_uoc_val_table {
\r
332 unsigned long rate_arm;
\r
333 unsigned long volt_arm;
\r
334 struct dvfs_volt_uoc vu_list[SIZE_SUPPORT_LOG_VOLT];
\r
336 static struct dvfs_uoc_val_table dvfs_uoc_val_list[SIZE_ARM_FREQ_TABLE];
\r
338 static int dvfs_get_uoc_val_init(unsigned long *p_volt_arm_new, unsigned long *p_volt_log_new,
\r
339 unsigned long rate_khz);
\r
340 static int dvfs_with_uoc_init(void)
\r
342 struct cpufreq_frequency_table *dvfs_arm_table;
\r
343 struct clk *cpu_clk;
\r
345 unsigned long arm_volt_save = 0;
\r
346 cpu_clk = clk_get(NULL, "cpu");
\r
347 if (IS_ERR(cpu_clk))
\r
348 return PTR_ERR(cpu_clk);
\r
350 dvfs_arm_table = dvfs_get_freq_volt_table(cpu_clk);
\r
351 lpj_24m = CLK_LOOPS_RECALC(24 * MHZ);
\r
352 DVFS_DBG("24M=%lu cur_rate=%lu lpj=%lu\n", lpj_24m, arm_pll_clk->rate, loops_per_jiffy);
\r
353 dvfs_gate_delay_init();
\r
354 dvfs_uoc_val_delay_init();
\r
356 for (i = 0; dvfs_arm_table[i].frequency != CPUFREQ_TABLE_END; i++) {
\r
357 if (i > SIZE_ARM_FREQ_TABLE - 1) {
\r
358 DVFS_WARNING("mach-rk30/dvfs.c:%s:%d: dvfs arm table to large, use only [%d] frequency\n",
\r
359 __func__, __LINE__, SIZE_ARM_FREQ_TABLE);
\r
362 rate_cycle[i].rate_khz = dvfs_arm_table[i].frequency;
\r
363 rate_cycle[i].cycle_ns = (1000UL * VD_DELAY_ZOOM) / (rate_cycle[i].rate_khz / 1000);
\r
364 DVFS_DBG("%s: rate=%lu, cycle_ns=%lu\n",
\r
365 __func__, rate_cycle[i].rate_khz, rate_cycle[i].cycle_ns);
\r
367 size_dvfs_arm_table = i + 1;
\r
369 //dvfs_uoc_val_list[];
\r
370 for (i = 0; dvfs_arm_table[i].frequency != CPUFREQ_TABLE_END; i++) {
\r
371 dvfs_uoc_val_list[i].rate_arm = dvfs_arm_table[i].frequency;
\r
372 dvfs_uoc_val_list[i].volt_arm = dvfs_arm_table[i].index;
\r
373 arm_volt_save = dvfs_uoc_val_list[i].volt_arm;
\r
374 for (j = 0; j < SIZE_SUPPORT_LOG_VOLT - 1; j++) {
\r
375 dvfs_uoc_val_list[i].vu_list[j].volt_log = dvfs_volt_log_support_table[j];
\r
376 dvfs_uoc_val_list[i].vu_list[j].volt_arm_new = arm_volt_save;
\r
377 dvfs_uoc_val_list[i].vu_list[j].volt_log_new = dvfs_uoc_val_list[i].vu_list[j].volt_log;
\r
378 //DVFS_DBG("%s: Rarm=%lu,Varm=%lu,Vlog=%lu\n", __func__,
\r
379 // dvfs_uoc_val_list[i].rate_arm, dvfs_uoc_val_list[i].volt_arm,
\r
380 // dvfs_uoc_val_list[i].vu_list[j].volt_log);
\r
382 dvfs_uoc_val_list[i].vu_list[j].uoc_val = dvfs_get_uoc_val_init(
\r
383 &dvfs_uoc_val_list[i].vu_list[j].volt_arm_new,
\r
384 &dvfs_uoc_val_list[i].vu_list[j].volt_log_new,
\r
385 dvfs_uoc_val_list[i].rate_arm);
\r
386 DVFS_DBG("%s: Rarm=%lu,(Varm=%lu,Vlog=%lu)--->(Vn_arm=%lu,Vn_log=%lu), uoc=%d\n", __func__,
\r
387 dvfs_uoc_val_list[i].rate_arm, dvfs_uoc_val_list[i].volt_arm,
\r
388 dvfs_uoc_val_list[i].vu_list[j].volt_log,
\r
389 dvfs_uoc_val_list[i].vu_list[j].volt_arm_new,
\r
390 dvfs_uoc_val_list[i].vu_list[j].volt_log_new,
\r
391 dvfs_uoc_val_list[i].vu_list[j].uoc_val);
\r
398 arch_initcall(dvfs_with_uoc_init);
\r
400 static unsigned long dvfs_get_cycle_by_rate(unsigned long rate_khz)
\r
403 for (i = 0; i < size_dvfs_arm_table - 1; i++) {
\r
404 if (rate_khz == rate_cycle[i].rate_khz)
\r
405 return rate_cycle[i].cycle_ns;
\r
407 DVFS_ERR("%s, %d: can not find rate=%lu KHz in list\n", __func__, __LINE__, rate_khz);
\r
410 #define UOC_NEED_INCREASE_ARM 0
\r
411 #define UOC_NEED_INCREASE_LOG 1
\r
412 static unsigned long get_uoc_delay(unsigned long hold, unsigned long uoc_val_xx)
\r
414 // hold - uoc_val_11; make sure not smaller than UOC_VAL_MIN
\r
415 return hold > uoc_val_xx ? (hold - uoc_val_xx) : UOC_VAL_MIN;
\r
417 static unsigned long dvfs_recalc_volt(unsigned long *p_volt_arm_new, unsigned long *p_volt_log_new,
\r
418 unsigned long arm_perf, unsigned long log_perf,
\r
419 unsigned long hold, unsigned long setup, unsigned long flag)
\r
422 unsigned long volt_arm = *p_volt_arm_new, volt_log = *p_volt_log_new;
\r
423 unsigned long curr_delay = 0;
\r
424 unsigned long uoc_val_11 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_11, *p_volt_log_new);
\r
426 if (flag == UOC_NEED_INCREASE_LOG) {
\r
427 for (i = 0; i < ARRAY_SIZE(dvfs_volt_log_support_table); i++) {
\r
428 if (dvfs_volt_log_support_table[i] <= volt_log)
\r
431 volt_log = dvfs_volt_log_support_table[i];
\r
432 log_perf = dvfs_get_perf_byvolt(volt_log);
\r
433 uoc_val_11 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_11, volt_log);
\r
434 curr_delay = dvfs_get_gate_delay(arm_perf, log_perf);
\r
435 DVFS_DBG("\t%s line:%d get volt=%lu; arm_perf=%lu, log_perf=%lu, curr_delay=%lu\n",
\r
436 __func__, __LINE__, dvfs_volt_log_support_table[i],
\r
437 arm_perf, log_perf, curr_delay);
\r
438 if (curr_delay > get_uoc_delay(hold, uoc_val_11)) {
\r
439 *p_volt_log_new = volt_log;
\r
443 } else if (flag == UOC_NEED_INCREASE_ARM) {
\r
444 for (i = 0; i < ARRAY_SIZE(dvfs_volt_arm_support_table); i++) {
\r
445 if (dvfs_volt_arm_support_table[i] <= volt_arm)
\r
448 volt_arm = dvfs_volt_arm_support_table[i];
\r
449 arm_perf = dvfs_get_perf_byvolt(volt_arm);
\r
450 curr_delay = dvfs_get_gate_delay(arm_perf, log_perf);
\r
451 DVFS_DBG("\t%s line:%d get volt=%lu; arm_perf=%lu, log_perf=%lu, curr_delay=%lu\n",
\r
452 __func__, __LINE__, dvfs_volt_log_support_table[i],
\r
453 arm_perf, log_perf, curr_delay);
\r
454 if (curr_delay < setup) {
\r
455 *p_volt_arm_new = volt_arm;
\r
461 DVFS_ERR("Oops, some bugs here, %s Unknown flag:%08lx\n", __func__, flag);
\r
466 static int dvfs_get_uoc_val_init(unsigned long *p_volt_arm_new, unsigned long *p_volt_log_new, unsigned long rate_khz)
\r
469 unsigned long arm_perf = 0, log_perf = 0;
\r
470 unsigned long cycle = 0, hold = 0, setup = 0;
\r
471 unsigned long curr_delay = 0; // arm slow than log
\r
472 //unsigned long volt_arm_new = *p_volt_arm_new;
\r
473 //unsigned long volt_log_new = *p_volt_log_new;
\r
474 unsigned long uoc_val_01 , uoc_val_11;
\r
475 //unsigned long rate_MHz;
\r
476 //DVFS_DBG("enter %s\n", __func__);
\r
477 arm_perf = dvfs_get_perf_byvolt(*p_volt_arm_new);
\r
478 log_perf = dvfs_get_perf_byvolt(*p_volt_log_new);
\r
479 uoc_val_01 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_01, *p_volt_log_new);
\r
480 uoc_val_11 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_11, *p_volt_log_new);
\r
481 DVFS_DBG("%s volt:arm(%lu), log(%lu);\tget perf arm(%lu), log(%lu)\n", __func__,
\r
482 *p_volt_arm_new, *p_volt_log_new, arm_perf, log_perf);
\r
484 // warning: this place may cause div 0 warning, DO NOT take place
\r
485 // rate_MHz with (rate / DVFS_MHZ)
\r
486 // rate_MHz = rate_khz / 1000;
\r
487 // cycle = (1000UL * VD_DELAY_ZOOM) / (rate_khz / 1000); // ns = 1 / rate(GHz), Magnified 1000 times
\r
488 cycle = dvfs_get_cycle_by_rate(rate_khz);
\r
490 hold = cycle * L2_HOLD / 100UL;
\r
491 setup = cycle * L2_SETUP / 100UL;
\r
493 curr_delay = dvfs_get_gate_delay(arm_perf, log_perf);
\r
494 DVFS_DBG("%s cycle=%lu, curr_delay=%lu, (hold=%lu, setup=%lu)\n",
\r
495 __func__, cycle, curr_delay, hold, setup);
\r
497 if (curr_delay <= get_uoc_delay(hold, uoc_val_11)) {
\r
498 DVFS_DBG("%s Need to increase log voltage\n", __func__);
\r
499 curr_delay = dvfs_recalc_volt(p_volt_arm_new, p_volt_log_new, arm_perf, log_perf,
\r
500 hold, setup, UOC_NEED_INCREASE_LOG);
\r
502 //log_perf = dvfs_get_perf_byvolt(*p_volt_log_new);
\r
503 uoc_val_01 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_01, *p_volt_log_new);
\r
504 uoc_val_11 = dvfs_get_uoc_val_xx_by_volt(UOC_VAL_11, *p_volt_log_new);
\r
506 } else if (curr_delay >= setup) {
\r
507 DVFS_DBG("%s Need to increase arm voltage\n", __func__);
\r
508 curr_delay = dvfs_recalc_volt(p_volt_arm_new, p_volt_log_new, arm_perf, log_perf,
\r
509 hold, setup, UOC_NEED_INCREASE_ARM);
\r
510 //arm_perf = dvfs_get_perf_byvolt(*p_volt_arm_new);
\r
513 DVFS_DBG("TARGET VOLT:arm(%lu), log(%lu);\tget perf arm(%lu), log(%lu)\n",
\r
514 *p_volt_arm_new, *p_volt_log_new,
\r
515 dvfs_get_perf_byvolt(*p_volt_arm_new), dvfs_get_perf_byvolt(*p_volt_log_new));
\r
516 // update uoc_val_01/11 with new volt
\r
517 DVFS_DBG("cycle=%lu, hold-val11=%lu, hold-val01=%lu, (hold=%lu, setup=%lu), curr_delay=%lu\n",
\r
518 cycle, get_uoc_delay(hold, uoc_val_11), get_uoc_delay(hold, uoc_val_01),
\r
519 hold, setup, curr_delay);
\r
520 if (curr_delay > hold && curr_delay < setup)
\r
522 else if (curr_delay <= hold && curr_delay > get_uoc_delay(hold, uoc_val_01))
\r
524 else if (curr_delay <= get_uoc_delay(hold, uoc_val_01) && curr_delay > get_uoc_delay(hold, uoc_val_11))
\r
527 DVFS_DBG("%s curr_delay=%lu, uoc_val=%d\n", __func__, curr_delay, uoc_val);
\r
531 static int dvfs_get_uoc_val(unsigned long *p_volt_arm_new, unsigned long *p_volt_log_new,
\r
532 unsigned long rate_khz)
\r
535 for (i = 0; i < size_dvfs_arm_table; i++) {
\r
536 if (dvfs_uoc_val_list[i].rate_arm != rate_khz)
\r
538 for (j = 0; j < SIZE_SUPPORT_LOG_VOLT - 1; j++) {
\r
539 if (dvfs_uoc_val_list[i].vu_list[j].volt_log < *p_volt_log_new)
\r
541 *p_volt_arm_new = dvfs_uoc_val_list[i].vu_list[j].volt_arm_new;
\r
542 *p_volt_log_new = dvfs_uoc_val_list[i].vu_list[j].volt_log_new;
\r
543 DVFS_DBG("%s: Varm_set=%lu, Vlog_set=%lu, uoc=%d\n", __func__,
\r
544 *p_volt_arm_new, *p_volt_log_new,
\r
545 dvfs_uoc_val_list[i].vu_list[j].uoc_val);
\r
546 return dvfs_uoc_val_list[i].vu_list[j].uoc_val;
\r
549 DVFS_ERR("%s: can not get uoc_val(Va=%lu, Vl=%lu, Ra=%lu)\n", __func__,
\r
550 *p_volt_arm_new, *p_volt_log_new, rate_khz);
\r
553 static int dvfs_set_uoc_val(int uoc_val)
\r
555 DVFS_DBG("%s set UOC = %d\n", __func__, uoc_val);
\r
557 ((readl_relaxed(RK30_GRF_BASE + GRF_UOC3_CON0) | (3 << (12 + 16)))
\r
558 & (~(3 << 12))) | (uoc_val << 12), RK30_GRF_BASE + GRF_UOC3_CON0);
\r
560 DVFS_DBG("read UOC=0x%08x\n", readl_relaxed(RK30_GRF_BASE + GRF_UOC3_CON0));
\r
564 static int target_set_rate(struct clk_node *dvfs_clk, unsigned long rate_new)
\r
568 if (dvfs_clk->clk_dvfs_target) {
\r
569 ret = dvfs_clk->clk_dvfs_target(dvfs_clk->clk, rate_new, clk_set_rate_locked);
\r
571 ret = clk_set_rate_locked(dvfs_clk->clk, rate_new);
\r
574 dvfs_clk->set_freq = rate_new / 1000;
\r
578 static int dvfs_balance_volt(unsigned long volt_arm_old, unsigned long volt_log_old)
\r
581 if (volt_arm_old > volt_log_old)
\r
582 ret = dvfs_scale_volt_direct(&vd_core, volt_arm_old);
\r
583 if (volt_arm_old < volt_log_old)
\r
584 ret = dvfs_scale_volt_direct(&vd_cpu, volt_log_old);
\r
586 DVFS_ERR("%s error, volt_arm_old=%lu, volt_log_old=%lu\n", __func__, volt_arm_old, volt_log_old);
\r
590 // use 24M to switch uoc bits
\r
591 static int uoc_pre = 0;
\r
592 static int dvfs_scale_volt_rate_with_uoc(
\r
593 unsigned long volt_arm_new, unsigned long volt_log_new,
\r
594 unsigned long volt_arm_old, unsigned long volt_log_old,
\r
595 unsigned long rate_arm_new)
\r
598 unsigned int axi_div = 0x0;
\r
599 unsigned long flags, lpj_save;
\r
600 DVFS_DBG("Va_new=%lu uV, Vl_new=%lu uV;(was Va_old=%lu uV, Vl_old=%lu uV); Ra_new=%luHz\n",
\r
601 volt_arm_new, volt_log_new, volt_arm_old, volt_log_old,
\r
603 axi_div = readl_relaxed(RK30_CRU_BASE + CRU_CLKSELS_CON(1));
\r
604 uoc_val = dvfs_get_uoc_val(&volt_arm_new, &volt_log_new, rate_arm_new);
\r
605 if (uoc_val == uoc_pre) {
\r
606 dvfs_scale_volt_bystep(&vd_cpu, &vd_core, volt_arm_new, volt_log_new,
\r
607 100 * 1000, 100 * 1000,
\r
608 volt_arm_new > volt_log_new ? (volt_arm_new - volt_log_new) : 0,
\r
609 volt_log_new > volt_arm_new ? (volt_log_new - volt_arm_new) : 0);
\r
612 //local_irq_save(flags);
\r
615 t[0] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
616 lpj_save = loops_per_jiffy;
\r
619 writel_relaxed(PLL_MODE_SLOW(APLL_ID), RK30_CRU_BASE + CRU_MODE_CON);
\r
620 loops_per_jiffy = lpj_24m;
\r
623 arm_pll_clk->rate = arm_pll_clk->recalc(arm_pll_clk);
\r
625 //cpu_axi parent to apll
\r
626 //writel_relaxed(0x00200000, RK30_CRU_BASE + CRU_CLKSELS_CON(0));
\r
627 clk_set_parent_nolock(clk_cpu_div, arm_pll_clk);
\r
629 //set axi/ahb/apb to 1:1:1
\r
630 writel_relaxed(axi_div & (~(0x3 << 0)) & (~(0x3 << 8)) & (~(0x3 << 12)), RK30_CRU_BASE + CRU_CLKSELS_CON(1));
\r
632 t[1] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
633 /*********************/
\r
634 //balance voltage before set UOC bits
\r
635 dvfs_balance_volt(volt_arm_old, volt_log_old);
\r
636 t[2] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
639 dvfs_set_uoc_val(uoc_val);
\r
640 t[3] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
643 dvfs_scale_volt_bystep(&vd_cpu, &vd_core, volt_arm_new, volt_log_new,
\r
644 100 * 1000, 100 * 1000,
\r
645 volt_arm_new > volt_log_new ? (volt_arm_new - volt_log_new) : 0,
\r
646 volt_log_new > volt_arm_new ? (volt_log_new - volt_arm_new) : 0);
\r
647 t[4] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
649 /*********************/
\r
650 //set axi/ahb/apb to default
\r
651 writel_relaxed(axi_div, RK30_CRU_BASE + CRU_CLKSELS_CON(1));
\r
653 //cpu_axi parent to gpll
\r
654 //writel_relaxed(0x00200020, RK30_CRU_BASE + CRU_CLKSELS_CON(0));
\r
655 clk_set_parent_nolock(clk_cpu_div, general_pll_clk);
\r
658 writel_relaxed(PLL_MODE_NORM(APLL_ID), RK30_CRU_BASE + CRU_MODE_CON);
\r
659 loops_per_jiffy = lpj_save;
\r
662 arm_pll_clk->rate = arm_pll_clk->recalc(arm_pll_clk);
\r
664 t[5] = readl_relaxed(RK30_TIMER1_BASE + 4);
\r
666 //local_irq_restore(flags);
\r
667 DVFS_DBG(KERN_DEBUG "T %d %d %d %d %d\n", t[0] - t[1], t[1] - t[2], t[2] - t[3], t[3] - t[4], t[4] - t[5]);
\r
673 // use 312M to switch uoc bits
\r
674 static int uoc_pre = 0;
\r
675 static int dvfs_scale_volt_rate_with_uoc(
\r
676 unsigned long volt_arm_new, unsigned long volt_log_new,
\r
677 unsigned long volt_arm_old, unsigned long volt_log_old,
\r
678 unsigned long rate_arm_new)
\r
681 unsigned long arm_freq = 0;
\r
682 uoc_val = dvfs_get_uoc_val(&volt_arm_new, &volt_log_new, rate_arm_new);
\r
683 DVFS_DBG("Va_new=%lu uV, Vl_new=%lu uV;(was Va_old=%lu uV, Vl_old=%lu uV); Ra_new=%luHz, uoc=%d\n",
\r
684 volt_arm_new, volt_log_new, volt_arm_old, volt_log_old, rate_arm_new, uoc_val);
\r
685 if (uoc_val == uoc_pre) {
\r
686 dvfs_scale_volt_bystep(&vd_cpu, &vd_core, volt_arm_new, volt_log_new,
\r
687 100 * 1000, 100 * 1000,
\r
688 volt_arm_new > volt_log_new ? (volt_arm_new - volt_log_new) : 0,
\r
689 volt_log_new > volt_arm_new ? (volt_log_new - volt_arm_new) : 0);
\r
692 arm_freq = clk_get_rate(clk_cpu);
\r
693 target_set_rate(dvfs_clk_cpu, 312 * MHZ);
\r
695 //cpu_axi parent to apll
\r
696 //writel_relaxed(0x00200000, RK30_CRU_BASE + CRU_CLKSELS_CON(0));
\r
697 clk_set_parent_nolock(clk_cpu_div, arm_pll_clk);
\r
699 /*********************/
\r
700 //balance voltage before set UOC bits
\r
701 dvfs_balance_volt(volt_arm_old, volt_log_old);
\r
704 dvfs_set_uoc_val(uoc_val);
\r
707 dvfs_scale_volt_bystep(&vd_cpu, &vd_core, volt_arm_new, volt_log_new,
\r
708 100 * 1000, 100 * 1000,
\r
709 volt_arm_new > volt_log_new ? (volt_arm_new - volt_log_new) : 0,
\r
710 volt_log_new > volt_arm_new ? (volt_log_new - volt_arm_new) : 0);
\r
712 /*********************/
\r
713 //cpu_axi parent to gpll
\r
714 //writel_relaxed(0x00200020, RK30_CRU_BASE + CRU_CLKSELS_CON(0));
\r
715 clk_set_parent_nolock(clk_cpu_div, general_pll_clk);
\r
717 //reset arm freq as normal freq
\r
718 target_set_rate(dvfs_clk_cpu, arm_freq);
\r
725 int dvfs_target_cpu(struct clk *clk, unsigned long rate_hz)
\r
727 struct clk_node *dvfs_clk;
\r
729 int volt_new = 0, volt_dep_new = 0, volt_old = 0, volt_dep_old = 0;
\r
730 struct cpufreq_frequency_table clk_fv;
\r
731 unsigned long rate_new, rate_old;
\r
734 DVFS_ERR("%s is not a clk\n", __func__);
\r
737 dvfs_clk = clk_get_dvfs_info(clk);
\r
738 DVFS_DBG("enter %s: clk(%s) rate = %lu Hz\n", __func__, dvfs_clk->name, rate_hz);
\r
740 if (!dvfs_clk || dvfs_clk->vd == NULL || IS_ERR_OR_NULL(dvfs_clk->vd->regulator)) {
\r
741 DVFS_ERR("dvfs(%s) is not register regulator\n", dvfs_clk->name);
\r
745 if (dvfs_clk->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
\r
746 /* It means the last time set voltage error */
\r
747 ret = dvfs_reset_volt(dvfs_clk->vd);
\r
753 /* Check limit rate */
\r
754 if (rate_hz < dvfs_clk->min_rate) {
\r
755 rate_hz = dvfs_clk->min_rate;
\r
756 } else if (rate_hz > dvfs_clk->max_rate) {
\r
757 rate_hz = dvfs_clk->max_rate;
\r
760 /* need round rate */
\r
761 volt_old = vd_cpu.cur_volt;
\r
762 volt_dep_old = vd_core.cur_volt;
\r
764 rate_old = clk_get_rate(clk);
\r
765 rate_new = clk_round_rate_nolock(clk, rate_hz);
\r
766 if(rate_new == rate_old)
\r
769 DVFS_DBG("dvfs(%s) round rate (%lu)(rount %lu) old (%lu)\n",
\r
770 dvfs_clk->name, rate_hz, rate_new, rate_old);
\r
772 /* find the clk corresponding voltage */
\r
773 if (0 != dvfs_clk_get_ref_volt(dvfs_clk, rate_new / 1000, &clk_fv)) {
\r
774 DVFS_ERR("dvfs(%s) rate %luhz is larger,not support\n", dvfs_clk->name, rate_hz);
\r
777 dvfs_clk->set_volt = clk_fv.index;
\r
780 volt_new = dvfs_vd_get_newvolt_byclk(dvfs_clk);
\r
781 volt_dep_new = dvfs_vd_get_newvolt_bypd(&vd_core);
\r
783 if (volt_dep_new <= 0)
\r
784 goto fail_roll_back;
\r
786 if (rate_new < rate_old)
\r
787 target_set_rate(dvfs_clk, rate_new);
\r
789 dvfs_scale_volt_rate_with_uoc(volt_new, volt_dep_new, volt_old, volt_dep_old,
\r
792 if (rate_new > rate_old)
\r
793 target_set_rate(dvfs_clk, rate_new);
\r
796 DVFS_DBG("UOC VOLT OK\n");
\r
800 //dvfs_clk = clk_get_rate(dvfs_clk->clk);
\r
804 int dvfs_target_core(struct clk *clk, unsigned long rate_hz)
\r
806 struct clk_node *dvfs_clk;
\r
808 int volt_new = 0, volt_dep_new = 0, volt_old = 0, volt_dep_old = 0;
\r
809 struct cpufreq_frequency_table clk_fv;
\r
810 unsigned long rate_new, rate_old;
\r
813 DVFS_ERR("%s is not a clk\n", __func__);
\r
816 dvfs_clk = clk_get_dvfs_info(clk);
\r
817 DVFS_DBG("enter %s: clk(%s) rate = %lu Hz\n", __func__, dvfs_clk->name, rate_hz);
\r
819 if (!dvfs_clk || dvfs_clk->vd == NULL || IS_ERR_OR_NULL(dvfs_clk->vd->regulator)) {
\r
820 DVFS_ERR("dvfs(%s) is not register regulator\n", dvfs_clk->name);
\r
824 if (dvfs_clk->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
\r
825 /* It means the last time set voltage error */
\r
826 ret = dvfs_reset_volt(dvfs_clk->vd);
\r
832 /* Check limit rate */
\r
833 if (rate_hz < dvfs_clk->min_rate) {
\r
834 rate_hz = dvfs_clk->min_rate;
\r
835 } else if (rate_hz > dvfs_clk->max_rate) {
\r
836 rate_hz = dvfs_clk->max_rate;
\r
839 /* need round rate */
\r
840 volt_old = vd_cpu.cur_volt;
\r
841 volt_dep_old = vd_core.cur_volt;
\r
843 rate_old = clk_get_rate(clk);
\r
844 rate_new = clk_round_rate_nolock(clk, rate_hz);
\r
845 if(rate_new == rate_old)
\r
848 DVFS_DBG("dvfs(%s) round rate (%lu)(rount %lu) old (%lu)\n",
\r
849 dvfs_clk->name, rate_hz, rate_new, rate_old);
\r
851 /* find the clk corresponding voltage */
\r
852 if (0 != dvfs_clk_get_ref_volt(dvfs_clk, rate_new / 1000, &clk_fv)) {
\r
853 DVFS_ERR("dvfs(%s) rate %luhz is larger,not support\n", dvfs_clk->name, rate_hz);
\r
856 dvfs_clk->set_volt = clk_fv.index;
\r
858 // target arm:volt_new/old, log:volt_dep_new/old
\r
859 volt_dep_new = dvfs_vd_get_newvolt_byclk(dvfs_clk);
\r
860 volt_new = dvfs_vd_get_newvolt_bypd(&vd_cpu);
\r
862 if (volt_dep_new <= 0)
\r
863 goto fail_roll_back;
\r
865 if (rate_new < rate_old)
\r
866 target_set_rate(dvfs_clk, rate_new);
\r
868 dvfs_scale_volt_rate_with_uoc(volt_new, volt_dep_new, volt_old, volt_dep_old,
\r
869 dvfs_clk_cpu->set_freq);
\r
871 if (rate_new > rate_old)
\r
872 target_set_rate(dvfs_clk, rate_new);
\r
874 DVFS_DBG("UOC VOLT OK\n");
\r
878 //dvfs_clk = clk_get_rate(dvfs_clk->clk);
\r
882 /*****************************init**************************/
\r
884 * rate must be raising sequence
\r
886 static struct cpufreq_frequency_table cpu_dvfs_table[] = {
\r
887 // {.frequency = 48 * DVFS_KHZ, .index = 920*DVFS_MV},
\r
888 // {.frequency = 126 * DVFS_KHZ, .index = 970 * DVFS_MV},
\r
889 // {.frequency = 252 * DVFS_KHZ, .index = 1040 * DVFS_MV},
\r
890 // {.frequency = 504 * DVFS_KHZ, .index = 1050 * DVFS_MV},
\r
891 {.frequency = 816 * DVFS_KHZ, .index = 1050 * DVFS_MV},
\r
892 // {.frequency = 1008 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
893 {.frequency = CPUFREQ_TABLE_END},
\r
896 static struct cpufreq_frequency_table ddr_dvfs_table[] = {
\r
897 // {.frequency = 100 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
898 {.frequency = 200 * DVFS_KHZ, .index = 1000 * DVFS_MV},
\r
899 {.frequency = 300 * DVFS_KHZ, .index = 1050 * DVFS_MV},
\r
900 {.frequency = 400 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
901 {.frequency = 500 * DVFS_KHZ, .index = 1150 * DVFS_MV},
\r
902 {.frequency = 600 * DVFS_KHZ, .index = 1200 * DVFS_MV},
\r
903 {.frequency = CPUFREQ_TABLE_END},
\r
906 static struct cpufreq_frequency_table gpu_dvfs_table[] = {
\r
907 {.frequency = 90 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
908 {.frequency = 180 * DVFS_KHZ, .index = 1150 * DVFS_MV},
\r
909 {.frequency = 300 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
910 {.frequency = 400 * DVFS_KHZ, .index = 1150 * DVFS_MV},
\r
911 {.frequency = 500 * DVFS_KHZ, .index = 1200 * DVFS_MV},
\r
912 {.frequency = CPUFREQ_TABLE_END},
\r
915 static struct cpufreq_frequency_table peri_aclk_dvfs_table[] = {
\r
916 {.frequency = 100 * DVFS_KHZ, .index = 1000 * DVFS_MV},
\r
917 {.frequency = 200 * DVFS_KHZ, .index = 1050 * DVFS_MV},
\r
918 {.frequency = 300 * DVFS_KHZ, .index = 1070 * DVFS_MV},
\r
919 {.frequency = 500 * DVFS_KHZ, .index = 1100 * DVFS_MV},
\r
920 {.frequency = CPUFREQ_TABLE_END},
\r
923 static struct cpufreq_frequency_table dep_cpu2core_table[] = {
\r
924 // {.frequency = 252 * DVFS_KHZ, .index = 1025 * DVFS_MV},
\r
925 // {.frequency = 504 * DVFS_KHZ, .index = 1025 * DVFS_MV},
\r
926 {.frequency = 816 * DVFS_KHZ, .index = 1050 * DVFS_MV},//logic 1.050V
\r
927 // {.frequency = 1008 * DVFS_KHZ,.index = 1050 * DVFS_MV},
\r
928 // {.frequency = 1200 * DVFS_KHZ,.index = 1050 * DVFS_MV},
\r
929 // {.frequency = 1272 * DVFS_KHZ,.index = 1050 * DVFS_MV},//logic 1.050V
\r
930 // {.frequency = 1416 * DVFS_KHZ,.index = 1100 * DVFS_MV},//logic 1.100V
\r
931 // {.frequency = 1512 * DVFS_KHZ,.index = 1125 * DVFS_MV},//logic 1.125V
\r
932 // {.frequency = 1608 * DVFS_KHZ,.index = 1175 * DVFS_MV},//logic 1.175V
\r
933 {.frequency = CPUFREQ_TABLE_END},
\r
936 static struct vd_node vd_cpu = {
\r
938 .regulator_name = "vdd_cpu",
\r
939 .volt_set_flag = DVFS_SET_VOLT_FAILURE,
\r
940 .vd_dvfs_target = dvfs_target_cpu,
\r
943 static struct vd_node vd_core = {
\r
945 .regulator_name = "vdd_core",
\r
946 .volt_set_flag = DVFS_SET_VOLT_FAILURE,
\r
947 .vd_dvfs_target = dvfs_target_core,
\r
950 static struct vd_node vd_rtc = {
\r
952 .regulator_name = "vdd_rtc",
\r
953 .volt_set_flag = DVFS_SET_VOLT_FAILURE,
\r
954 .vd_dvfs_target = NULL,
\r
957 static struct vd_node *rk30_vds[] = {&vd_cpu, &vd_core, &vd_rtc};
\r
959 static struct pd_node pd_a9_0 = {
\r
963 static struct pd_node pd_a9_1 = {
\r
967 static struct pd_node pd_debug = {
\r
968 .name = "pd_debug",
\r
971 static struct pd_node pd_scu = {
\r
975 static struct pd_node pd_video = {
\r
976 .name = "pd_video",
\r
979 static struct pd_node pd_vio = {
\r
983 static struct pd_node pd_gpu = {
\r
987 static struct pd_node pd_peri = {
\r
991 static struct pd_node pd_cpu = {
\r
995 static struct pd_node pd_alive = {
\r
996 .name = "pd_alive",
\r
999 static struct pd_node pd_rtc = {
\r
1003 #define LOOKUP_PD(_ppd) \
\r
1007 static struct pd_node_lookup rk30_pds[] = {
\r
1008 LOOKUP_PD(&pd_a9_0),
\r
1009 LOOKUP_PD(&pd_a9_1),
\r
1010 LOOKUP_PD(&pd_debug),
\r
1011 LOOKUP_PD(&pd_scu),
\r
1012 LOOKUP_PD(&pd_video),
\r
1013 LOOKUP_PD(&pd_vio),
\r
1014 LOOKUP_PD(&pd_gpu),
\r
1015 LOOKUP_PD(&pd_peri),
\r
1016 LOOKUP_PD(&pd_cpu),
\r
1017 LOOKUP_PD(&pd_alive),
\r
1018 LOOKUP_PD(&pd_rtc),
\r
1021 #define CLK_PDS(_ppd) \
\r
1026 static struct pds_list cpu_pds[] = {
\r
1027 CLK_PDS(&pd_a9_0),
\r
1028 CLK_PDS(&pd_a9_1),
\r
1032 static struct pds_list ddr_pds[] = {
\r
1037 static struct pds_list gpu_pds[] = {
\r
1042 static struct pds_list aclk_periph_pds[] = {
\r
1043 CLK_PDS(&pd_peri),
\r
1047 #define RK_CLKS(_clk_name, _ppds, _dvfs_table, _dvfs_nb) \
\r
1049 .name = _clk_name, \
\r
1051 .dvfs_table = _dvfs_table, \
\r
1052 .dvfs_nb = _dvfs_nb, \
\r
1055 static struct clk_node rk30_clks[] = {
\r
1056 RK_CLKS("cpu", cpu_pds, cpu_dvfs_table, &rk_dvfs_clk_notifier),
\r
1057 RK_CLKS("ddr", ddr_pds, ddr_dvfs_table, &rk_dvfs_clk_notifier),
\r
1058 RK_CLKS("gpu", gpu_pds, gpu_dvfs_table, &rk_dvfs_clk_notifier),
\r
1059 RK_CLKS("aclk_periph", aclk_periph_pds, peri_aclk_dvfs_table, &rk_dvfs_clk_notifier),
\r
1062 #define RK_DEPPENDS(_clk_name, _pvd, _dep_table) \
\r
1064 .clk_name = _clk_name, \
\r
1066 .dep_table = _dep_table, \
\r
1069 static struct depend_lookup rk30_depends[] = {
\r
1070 #ifndef CONFIG_ARCH_RK3066B
\r
1071 RK_DEPPENDS("cpu", &vd_core, dep_cpu2core_table),
\r
1073 //RK_DEPPENDS("gpu", &vd_cpu, NULL),
\r
1074 //RK_DEPPENDS("gpu", &vd_cpu, NULL),
\r
1076 static struct avs_ctr_st rk30_avs_ctr;
\r
1078 int rk_dvfs_init(void)
\r
1081 for (i = 0; i < ARRAY_SIZE(rk30_vds); i++) {
\r
1082 rk_regist_vd(rk30_vds[i]);
\r
1084 for (i = 0; i < ARRAY_SIZE(rk30_pds); i++) {
\r
1085 rk_regist_pd(&rk30_pds[i]);
\r
1087 for (i = 0; i < ARRAY_SIZE(rk30_clks); i++) {
\r
1088 rk_regist_clk(&rk30_clks[i]);
\r
1090 for (i = 0; i < ARRAY_SIZE(rk30_depends); i++) {
\r
1091 rk_regist_depends(&rk30_depends[i]);
\r
1093 dvfs_clk_cpu = dvfs_get_dvfs_clk_byname("cpu");
\r
1094 clk_cpu = clk_get(NULL, "cpu");
\r
1095 if (IS_ERR_OR_NULL(clk_cpu)) {
\r
1096 DVFS_ERR("%s get clk_cpu error\n", __func__);
\r
1100 clk_cpu_div = clk_get(NULL, "logic");
\r
1101 if (IS_ERR_OR_NULL(clk_cpu_div)) {
\r
1102 DVFS_ERR("%s get clk_cpu_div error\n", __func__);
\r
1106 arm_pll_clk = clk_get(NULL, "arm_pll");
\r
1107 if (IS_ERR_OR_NULL(arm_pll_clk)) {
\r
1108 DVFS_ERR("%s get arm_pll_clk error\n", __func__);
\r
1112 general_pll_clk = clk_get(NULL, "general_pll");
\r
1113 if (IS_ERR_OR_NULL(general_pll_clk)) {
\r
1114 DVFS_ERR("%s get general_pll_clk error\n", __func__);
\r
1118 avs_board_init(&rk30_avs_ctr);
\r
1119 DVFS_DBG("rk30_dvfs_init\n");
\r
1125 /******************************rk30 avs**************************************************/
\r
1127 #ifdef CONFIG_ARCH_RK3066B
\r
1129 static void __iomem *rk30_nandc_base = NULL;
\r
1131 #define nandc_readl(offset) readl_relaxed(rk30_nandc_base + offset)
\r
1132 #define nandc_writel(v, offset) do { writel_relaxed(v, rk30_nandc_base + offset); dsb(); } while (0)
\r
1133 static u8 rk30_get_avs_val(void)
\r
1135 u32 nanc_save_reg[4];
\r
1136 unsigned long flags;
\r
1139 if(rk30_nandc_base == NULL)
\r
1142 preempt_disable();
\r
1143 local_irq_save(flags);
\r
1145 nanc_save_reg[0] = nandc_readl(0);
\r
1146 nanc_save_reg[1] = nandc_readl(0x130);
\r
1147 nanc_save_reg[2] = nandc_readl(0x134);
\r
1148 nanc_save_reg[3] = nandc_readl(0x158);
\r
1150 nandc_writel(nanc_save_reg[0] | 0x1 << 14, 0);
\r
1151 nandc_writel(0x5, 0x130);
\r
1153 /* Just break lock status */
\r
1154 nandc_writel(0x1, 0x158);
\r
1155 nandc_writel(3, 0x158);
\r
1156 nandc_writel(1, 0x134);
\r
1159 paramet = nandc_readl(0x138);
\r
1160 if((paramet & 0x1))
\r
1164 paramet = (paramet >> 1) & 0xff;
\r
1165 nandc_writel(nanc_save_reg[0], 0);
\r
1166 nandc_writel(nanc_save_reg[1], 0x130);
\r
1167 nandc_writel(nanc_save_reg[2], 0x134);
\r
1168 nandc_writel(nanc_save_reg[3], 0x158);
\r
1170 local_irq_restore(flags);
\r
1172 return (u8)paramet;
\r
1176 void rk30_avs_init(void)
\r
1178 rk30_nandc_base = ioremap(RK30_NANDC_PHYS, RK30_NANDC_SIZE);
\r
1180 static struct avs_ctr_st rk30_avs_ctr = {
\r
1181 .avs_init = rk30_avs_init,
\r
1182 .avs_get_val = rk30_get_avs_val,
\r