1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 extern int rockchip_tsadc_get_temp(int chn);
27 #define MHz (1000 * 1000)
28 static LIST_HEAD(rk_dvfs_tree);
29 static DEFINE_MUTEX(rk_dvfs_mutex);
30 static struct workqueue_struct *dvfs_wq;
31 static struct dvfs_node *clk_cpu_dvfs_node;
32 static unsigned int target_temp = 80;
33 static int temp_limit_enable = 1;
35 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
39 if(new_volt <= old_volt)
41 if(vd->volt_time_flag > 0)
42 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
45 if(u_time < 0) {// regulator is not suported time,useing default time
46 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
48 u_time = ((new_volt) - (old_volt)) >> 9;
51 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
52 __func__, vd->name, old_volt, new_volt, u_time);
55 mdelay(u_time / 1000);
56 udelay(u_time % 1000);
57 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
58 __func__, old_volt, new_volt);
64 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
66 int ret = 0, read_back = 0;
68 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
70 DVFS_ERR("%s: now read back to check voltage\n", __func__);
72 /* read back to judge if it is already effect */
74 read_back = dvfs_regulator_get_voltage(regulator);
75 if (read_back == max_uV) {
76 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
79 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
86 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
90 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
92 if (IS_ERR_OR_NULL(vd_clk)) {
93 DVFS_ERR("%s: vd_node error\n", __func__);
97 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
98 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
99 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
101 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
102 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
103 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
108 DVFS_ERR("%s: invalid regulator\n", __func__);
112 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
113 vd_clk->cur_volt = volt_new;
119 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
121 int flag_set_volt_correct = 0;
122 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
123 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
125 DVFS_ERR("%s: invalid regulator\n", __func__);
128 if (flag_set_volt_correct <= 0) {
129 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
130 __func__, dvfs_vd->name, flag_set_volt_correct);
133 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
134 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
135 __func__, dvfs_vd->name, flag_set_volt_correct);
137 /* Reset vd's voltage */
138 dvfs_vd->cur_volt = flag_set_volt_correct;
140 return dvfs_vd->cur_volt;
144 // for clk enable case to get vd regulator info
145 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
147 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
148 if(vd->cur_volt <= 0){
149 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
151 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
154 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
156 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
157 int n = 0, sel_volt = 0;
159 if(selector > VD_VOL_LIST_CNT)
160 selector = VD_VOL_LIST_CNT;
162 for (i = 0; i < selector; i++) {
163 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
165 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
166 // __func__, vd->name, i, sel_volt);
169 vd->volt_list[n++] = sel_volt;
170 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
171 __func__, vd->name, i, n, sel_volt);
178 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
183 for (i = 0; i < vd->n_voltages; i++) {
184 sel_volt = vd->volt_list[i];
186 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
197 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
202 for (i = 0; i < vd->n_voltages; i++) {
203 sel_volt = vd->volt_list[i];
205 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
211 return vd->volt_list[i-1];
221 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
225 if(flags == VD_LIST_RELATION_L)
226 return vd_regulator_round_volt_min(vd, volt);
228 return vd_regulator_round_volt_max(vd, volt);
231 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
235 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
236 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
239 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
241 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
244 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
245 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
248 DVFS_DBG("clk %s:round_volt %d to %d\n",
249 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
251 clk_dvfs_node->dvfs_table[i].index=test_volt;
255 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
257 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
258 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
259 if(vd->volt_time_flag < 0){
260 DVFS_DBG("%s,vd %s volt_time is no support\n",
264 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
265 __func__, vd->name, vd->volt_time_flag);
270 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
272 //REGULATOR_MODE_FAST
273 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
274 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
275 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
276 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
278 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
279 vd->mode_flag = 0;// check again
282 if(vd->mode_flag > 0){
283 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
284 __func__, vd->name, vd->mode_flag);
287 DVFS_DBG("%s,vd %s mode is not support now check\n",
294 struct regulator *dvfs_get_regulator(char *regulator_name)
298 mutex_lock(&rk_dvfs_mutex);
299 list_for_each_entry(vd, &rk_dvfs_tree, node) {
300 if (strcmp(regulator_name, vd->regulator_name) == 0) {
301 mutex_unlock(&rk_dvfs_mutex);
302 return vd->regulator;
305 mutex_unlock(&rk_dvfs_mutex);
309 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
311 struct cpufreq_frequency_table *table;
317 clk_dvfs_node->min_rate = 0;
318 clk_dvfs_node->max_rate = 0;
320 table = clk_dvfs_node->dvfs_table;
321 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
322 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
324 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
327 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
328 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
333 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
335 int i, rate, temp_rate, flags;
337 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
340 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
341 //ddr rate = real rate+flags
342 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
343 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
344 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
346 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
347 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
351 /* Set rate unit as MHZ */
352 if (temp_rate % MHz != 0)
353 temp_rate = (temp_rate / MHz + 1) * MHz;
355 temp_rate = (temp_rate / 1000) + flags;
357 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
358 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
360 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
364 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
365 struct cpufreq_frequency_table *clk_fv)
369 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
373 clk_fv->frequency = rate_khz;
376 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
377 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
378 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
379 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
380 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
381 //clk_fv->frequency, clk_fv->index);
385 clk_fv->frequency = 0;
387 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
391 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
395 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
396 return clk_dvfs_node->set_volt;
399 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
400 if (clk_dvfs_node->enable_count)
401 volt_max = max(volt_max, clk_dvfs_node->set_volt);
406 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
413 pd = clk_dvfs_node->pd;
417 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
420 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
428 list_for_each_entry(pd, &vd->pd_list, node) {
429 volt_max_vd = max(volt_max_vd, pd->cur_volt);
435 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
440 dvfs_update_clk_pds_volt(clk_dvfs_node);
441 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
444 static void dvfs_temp_limit_work_func(struct work_struct *work)
446 unsigned long delay = HZ / 10; // 100ms
449 struct dvfs_node *clk_dvfs_node;
451 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
453 mutex_lock(&rk_dvfs_mutex);
454 list_for_each_entry(vd, &rk_dvfs_tree, node) {
455 mutex_lock(&vd->mutex);
456 list_for_each_entry(pd, &vd->pd_list, node) {
457 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
458 if (clk_dvfs_node->temp_limit_table) {
459 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
460 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
464 mutex_unlock(&vd->mutex);
466 mutex_unlock(&rk_dvfs_mutex);
470 static void dvfs_temp_limit_work_func(struct work_struct *work)
472 int temp=0, delta_temp=0;
473 unsigned long delay = HZ/10;
474 unsigned long arm_rate_step=0;
475 static int old_temp=0;
478 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
480 temp = rockchip_tsadc_get_temp(1);
483 delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
487 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
488 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
492 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
493 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
494 if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
495 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
498 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
499 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
500 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
504 if (temp > target_temp) {
505 if (temp > old_temp) {
506 delta_temp = temp - target_temp;
507 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
508 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
509 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
512 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
513 clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
514 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
518 if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
519 delta_temp = target_temp - temp;
520 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
521 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
522 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
527 clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
528 if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
529 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
531 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
537 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
541 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
544 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
546 u32 rate = 0, ret = 0;
548 if (!clk_dvfs_node || (min_rate > max_rate))
551 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
552 mutex_lock(&clk_dvfs_node->vd->mutex);
554 /* To reset clk_dvfs_node->min_rate/max_rate */
555 dvfs_get_rate_range(clk_dvfs_node);
556 clk_dvfs_node->freq_limit_en = 1;
558 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
559 clk_dvfs_node->min_rate = min_rate;
562 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
563 clk_dvfs_node->max_rate = max_rate;
566 if (clk_dvfs_node->last_set_rate == 0)
567 rate = __clk_get_rate(clk_dvfs_node->clk);
569 rate = clk_dvfs_node->last_set_rate;
570 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
572 mutex_unlock(&clk_dvfs_node->vd->mutex);
576 DVFS_DBG("%s:clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
577 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate,
578 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
582 EXPORT_SYMBOL(dvfs_clk_enable_limit);
584 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
591 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
592 mutex_lock(&clk_dvfs_node->vd->mutex);
594 /* To reset clk_dvfs_node->min_rate/max_rate */
595 dvfs_get_rate_range(clk_dvfs_node);
596 clk_dvfs_node->freq_limit_en = 0;
597 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
599 mutex_unlock(&clk_dvfs_node->vd->mutex);
602 DVFS_DBG("%s: clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
603 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
606 EXPORT_SYMBOL(dvfs_clk_disable_limit);
608 void dvfs_disable_temp_limit(void) {
609 temp_limit_enable = 0;
610 cancel_delayed_work_sync(&dvfs_temp_limit_work);
613 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
620 mutex_lock(&clk_dvfs_node->vd->mutex);
622 *min_rate = clk_dvfs_node->min_rate;
623 *max_rate = clk_dvfs_node->max_rate;
624 freq_limit_en = clk_dvfs_node->freq_limit_en;
626 mutex_unlock(&clk_dvfs_node->vd->mutex);
628 return freq_limit_en;
630 EXPORT_SYMBOL(dvfs_clk_get_limit);
632 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
637 mutex_lock(&clk_dvfs_node->vd->mutex);
638 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
639 mutex_unlock(&clk_dvfs_node->vd->mutex);
643 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
645 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
647 struct cpufreq_frequency_table *table;
652 mutex_lock(&clk_dvfs_node->vd->mutex);
653 table = clk_dvfs_node->dvfs_table;
654 mutex_unlock(&clk_dvfs_node->vd->mutex);
658 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
660 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
665 if (IS_ERR_OR_NULL(table)){
666 DVFS_ERR("%s:invalid table!\n", __func__);
670 mutex_lock(&clk_dvfs_node->vd->mutex);
671 clk_dvfs_node->dvfs_table = table;
672 dvfs_get_rate_range(clk_dvfs_node);
673 dvfs_table_round_clk_rate(clk_dvfs_node);
674 dvfs_table_round_volt(clk_dvfs_node);
675 mutex_unlock(&clk_dvfs_node->vd->mutex);
679 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
681 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
683 struct cpufreq_frequency_table clk_fv;
689 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
690 __func__, __clk_get_name(clk_dvfs_node->clk));
692 if (!clk_dvfs_node->vd) {
693 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
694 __func__, clk_dvfs_node->name);
697 mutex_lock(&clk_dvfs_node->vd->mutex);
698 if (clk_dvfs_node->enable_count == 0) {
699 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
700 if (clk_dvfs_node->vd->regulator_name)
701 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
702 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
703 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
704 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
705 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
706 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
707 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
709 clk_dvfs_node->enable_count = 0;
710 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
711 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
712 mutex_unlock(&clk_dvfs_node->vd->mutex);
716 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
719 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
720 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
722 dvfs_table_round_clk_rate(clk_dvfs_node);
723 dvfs_get_rate_range(clk_dvfs_node);
724 clk_dvfs_node->freq_limit_en = 1;
725 dvfs_table_round_volt(clk_dvfs_node);
726 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
727 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
729 DVFS_DBG("%s: %s get freq %u!\n",
730 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
732 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
733 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
734 DVFS_ERR("%s: table empty\n", __func__);
735 clk_dvfs_node->enable_count = 0;
736 mutex_unlock(&clk_dvfs_node->vd->mutex);
739 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
740 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
741 clk_dvfs_node->enable_count++;
742 mutex_unlock(&clk_dvfs_node->vd->mutex);
746 clk_dvfs_node->enable_count++;
747 clk_dvfs_node->set_volt = clk_fv.index;
748 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
749 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
750 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
752 if (clk_dvfs_node->dvfs_nb) {
753 // must unregister when clk disable
754 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
757 if(clk_dvfs_node->vd->cur_volt != volt_new) {
759 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
760 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
762 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
763 clk_dvfs_node->enable_count = 0;
764 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
765 mutex_unlock(&clk_dvfs_node->vd->mutex);
768 clk_dvfs_node->vd->cur_volt = volt_new;
769 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
773 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
774 __func__, clk_dvfs_node->enable_count);
775 clk_dvfs_node->enable_count++;
778 mutex_unlock(&clk_dvfs_node->vd->mutex);
782 EXPORT_SYMBOL(clk_enable_dvfs);
784 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
791 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
792 __func__, __clk_get_name(clk_dvfs_node->clk));
794 mutex_lock(&clk_dvfs_node->vd->mutex);
795 if (!clk_dvfs_node->enable_count) {
796 DVFS_WARNING("%s:clk(%s) is already closed!\n",
797 __func__, __clk_get_name(clk_dvfs_node->clk));
798 mutex_unlock(&clk_dvfs_node->vd->mutex);
801 clk_dvfs_node->enable_count--;
802 if (0 == clk_dvfs_node->enable_count) {
803 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
804 __func__, __clk_get_name(clk_dvfs_node->clk));
805 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
806 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
809 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
810 DVFS_DBG("clk unregister nb!\n");
814 mutex_unlock(&clk_dvfs_node->vd->mutex);
817 EXPORT_SYMBOL(clk_disable_dvfs);
819 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
821 unsigned long limit_rate;
824 if (clk_dvfs_node->freq_limit_en) {
826 if (rate < clk_dvfs_node->min_rate) {
827 limit_rate = clk_dvfs_node->min_rate;
828 } else if (rate > clk_dvfs_node->max_rate) {
829 limit_rate = clk_dvfs_node->max_rate;
831 if (temp_limit_enable) {
832 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
833 limit_rate = clk_dvfs_node->temp_limit_rate;
838 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
843 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
845 struct cpufreq_frequency_table clk_fv;
846 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
847 struct clk *clk = clk_dvfs_node->clk;
853 if (!clk_dvfs_node->enable_count){
854 DVFS_WARNING("%s:dvfs(%s) is disable\n",
855 __func__, clk_dvfs_node->name);
859 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
860 /* It means the last time set voltage error */
861 ret = dvfs_reset_volt(clk_dvfs_node->vd);
867 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
868 new_rate = __clk_round_rate(clk, rate);
869 old_rate = __clk_get_rate(clk);
870 if (new_rate == old_rate)
873 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
874 __func__, clk_dvfs_node->name, rate, old_rate);
876 /* find the clk corresponding voltage */
877 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
879 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
880 __func__, clk_dvfs_node->name, new_rate);
883 clk_volt_store = clk_dvfs_node->set_volt;
884 clk_dvfs_node->set_volt = clk_fv.index;
885 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
886 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
887 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
890 if (new_rate > old_rate) {
891 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
897 if (clk_dvfs_node->clk_dvfs_target) {
898 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
900 ret = clk_set_rate(clk, rate);
904 DVFS_ERR("%s:clk(%s) set rate err\n",
905 __func__, __clk_get_name(clk));
908 clk_dvfs_node->set_freq = new_rate / 1000;
910 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
911 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
913 /* if down the rate */
914 if (new_rate < old_rate) {
915 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
922 clk_dvfs_node->set_volt = clk_volt_store;
927 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
929 return __clk_round_rate(clk_dvfs_node->clk, rate);
931 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
933 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
935 return __clk_get_rate(clk_dvfs_node->clk);
937 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
939 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
941 unsigned long last_set_rate;
943 mutex_lock(&clk_dvfs_node->vd->mutex);
944 last_set_rate = clk_dvfs_node->last_set_rate;
945 mutex_unlock(&clk_dvfs_node->vd->mutex);
947 return last_set_rate;
949 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
952 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
954 return clk_enable(clk_dvfs_node->clk);
956 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
958 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
960 return clk_disable(clk_dvfs_node->clk);
962 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
964 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
968 struct dvfs_node *clk_dvfs_node;
970 mutex_lock(&rk_dvfs_mutex);
971 list_for_each_entry(vd, &rk_dvfs_tree, node) {
972 mutex_lock(&vd->mutex);
973 list_for_each_entry(pd, &vd->pd_list, node) {
974 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
975 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
976 mutex_unlock(&vd->mutex);
977 mutex_unlock(&rk_dvfs_mutex);
978 return clk_dvfs_node;
982 mutex_unlock(&vd->mutex);
984 mutex_unlock(&rk_dvfs_mutex);
988 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
990 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
994 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
996 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
998 return clk_prepare_enable(clk_dvfs_node->clk);
1000 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1003 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1005 clk_disable_unprepare(clk_dvfs_node->clk);
1007 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1009 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1016 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1017 __func__, clk_dvfs_node->name, rate);
1019 #if 0 // judge by reference func in rk
1020 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1021 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1026 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1027 mutex_lock(&clk_dvfs_node->vd->mutex);
1028 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1029 clk_dvfs_node->last_set_rate = rate;
1030 mutex_unlock(&clk_dvfs_node->vd->mutex);
1032 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1033 __func__, clk_dvfs_node->name);
1038 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1041 int rk_regist_vd(struct vd_node *vd)
1047 vd->volt_time_flag=0;
1049 INIT_LIST_HEAD(&vd->pd_list);
1050 mutex_lock(&rk_dvfs_mutex);
1051 list_add(&vd->node, &rk_dvfs_tree);
1052 mutex_unlock(&rk_dvfs_mutex);
1056 EXPORT_SYMBOL_GPL(rk_regist_vd);
1058 int rk_regist_pd(struct pd_node *pd)
1069 INIT_LIST_HEAD(&pd->clk_list);
1070 mutex_lock(&vd->mutex);
1071 list_add(&pd->node, &vd->pd_list);
1072 mutex_unlock(&vd->mutex);
1076 EXPORT_SYMBOL_GPL(rk_regist_pd);
1078 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1086 vd = clk_dvfs_node->vd;
1087 pd = clk_dvfs_node->pd;
1091 mutex_lock(&vd->mutex);
1092 list_add(&clk_dvfs_node->node, &pd->clk_list);
1093 mutex_unlock(&vd->mutex);
1097 EXPORT_SYMBOL_GPL(rk_regist_clk);
1099 static int rk_convert_cpufreq_table(struct dvfs_node *dvfs_node)
1103 struct cpufreq_frequency_table *table;
1106 table = dvfs_node->dvfs_table;
1107 dev = &dvfs_node->dev;
1109 for (i = 0; table[i].frequency!= CPUFREQ_TABLE_END; i++){
1110 opp = opp_find_freq_exact(dev, table[i].frequency * 1000, true);
1112 return PTR_ERR(opp);
1113 table[i].index = opp_get_voltage(opp);
1118 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1120 struct cpufreq_frequency_table *temp_limt_table = NULL;
1121 const struct property *prop;
1125 prop = of_find_property(dev_node, propname, NULL);
1131 nr = prop->length / sizeof(u32);
1133 pr_err("%s: Invalid freq list\n", __func__);
1137 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1138 (nr/2 + 1), GFP_KERNEL);
1142 for (i=0; i<nr/2; i++){
1143 temp_limt_table[i].index = be32_to_cpup(val++);
1144 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1147 temp_limt_table[i].index = 0;
1148 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1150 return temp_limt_table;
1154 int of_dvfs_init(void)
1158 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
1159 struct dvfs_node *dvfs_node;
1164 DVFS_DBG("%s\n", __func__);
1166 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
1167 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
1168 DVFS_ERR("%s get dvfs dev node err\n", __func__);
1169 return PTR_ERR(dvfs_dev_node);
1172 val = of_get_property(dvfs_dev_node, "target-temp", NULL);
1174 target_temp = be32_to_cpup(val);
1177 val = of_get_property(dvfs_dev_node, "temp-limit-enable", NULL);
1179 temp_limit_enable = be32_to_cpup(val);
1182 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
1183 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
1187 mutex_init(&vd->mutex);
1188 vd->name = vd_dev_node->name;
1189 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
1191 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
1192 __func__, vd_dev_node->name, ret);
1197 vd->suspend_volt = 0;
1199 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1200 vd->vd_dvfs_target = dvfs_target;
1201 ret = rk_regist_vd(vd);
1203 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
1208 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
1209 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
1211 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
1212 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
1217 pd->name = pd_dev_node->name;
1219 ret = rk_regist_pd(pd);
1221 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
1225 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
1226 __func__, pd->name, vd->name);
1227 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
1228 if (!of_device_is_available(clk_dev_node))
1231 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
1235 dvfs_node->name = clk_dev_node->name;
1238 if (temp_limit_enable) {
1239 val = of_get_property(clk_dev_node, "temp-channel", NULL);
1241 dvfs_node->temp_channel = be32_to_cpup(val);
1243 dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
1244 dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
1246 dvfs_node->temp_limit_rate = -1;
1247 dvfs_node->dev.of_node = clk_dev_node;
1248 ret = of_init_opp_table(&dvfs_node->dev);
1250 DVFS_ERR("%s:clk(%s) get opp table err:%d\n", __func__, dvfs_node->name, ret);
1255 ret = opp_init_cpufreq_table(&dvfs_node->dev, &dvfs_node->dvfs_table);
1257 DVFS_ERR("%s:clk(%s) get cpufreq table err:%d\n", __func__, dvfs_node->name, ret);
1261 ret = rk_convert_cpufreq_table(dvfs_node);
1267 clk = clk_get(NULL, clk_dev_node->name);
1269 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
1275 dvfs_node->clk = clk;
1276 ret = rk_regist_clk(dvfs_node);
1278 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
1282 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
1283 __func__, clk_dev_node->name, pd->name);
1291 /*********************************************************************************/
1293 * dump_dbg_map() : Draw all informations of dvfs while debug
1295 static int dump_dbg_map(char *buf)
1300 struct dvfs_node *clk_dvfs_node;
1303 mutex_lock(&rk_dvfs_mutex);
1304 printk( "-------------DVFS TREE-----------\n\n\n");
1305 printk( "DVFS TREE:\n");
1307 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1308 mutex_lock(&vd->mutex);
1309 printk( "|\n|- voltage domain:%s\n", vd->name);
1310 printk( "|- current voltage:%d\n", vd->cur_volt);
1312 list_for_each_entry(pd, &vd->pd_list, node) {
1313 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d\n",
1314 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt);
1316 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1317 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
1318 " enable_dvfs = %s\n",
1319 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
1320 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
1321 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %u\n",
1322 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
1323 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
1324 clk_dvfs_node->last_set_rate/1000);
1326 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1327 printk( "| | | |- freq = %d, volt = %d\n",
1328 clk_dvfs_node->dvfs_table[i].frequency,
1329 clk_dvfs_node->dvfs_table[i].index);
1334 mutex_unlock(&vd->mutex);
1337 printk( "-------------DVFS TREE END------------\n");
1338 mutex_unlock(&rk_dvfs_mutex);
1343 /*********************************************************************************/
1344 static struct kobject *dvfs_kobj;
1345 struct dvfs_attribute {
1346 struct attribute attr;
1347 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
1349 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
1350 const char *buf, size_t n);
1353 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
1354 const char *buf, size_t n)
1358 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
1361 return dump_dbg_map(buf);
1365 static struct dvfs_attribute dvfs_attrs[] = {
1366 /* node_name permision show_func store_func */
1367 //#ifdef CONFIG_RK_CLOCK_PROC
1368 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
1372 static int __init dvfs_init(void)
1376 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
1379 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
1380 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
1382 DVFS_ERR("create index %d error\n", i);
1387 if (temp_limit_enable) {
1388 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
1389 if (!clk_cpu_dvfs_node){
1393 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1394 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1395 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
1401 late_initcall(dvfs_init);