&clk_core_dvfs_table {
operating-points = <
/* KHz uV */
- 312000 1100000
- 504000 1100000
- 816000 1100000
- 1008000 1100000
+ 312000 850000
+ 504000 850000
+ 816000 950000
+ 1008000 1000000
+ 1200000 1050000
+ 1416000 1150000
+ 1608000 1250000
>;
};
&clk_gpu_dvfs_table {
operating-points = <
/* KHz uV */
- 200000 1200000
- 300000 1200000
- 400000 1200000
- 600000 1300000
+ 100000 850000
+ 200000 850000
+ 300000 900000
+ 400000 1000000
+ 600000 1250000
>;
};
816000 1100000
1008000 1100000
>;
+ temp-channel=<1>;
+ temp-limit = <
+ /*temp freq*/
+ 50 1608000
+ 70 1416000
+ 80 1200000
+ 100 1008000
+ >;
status = "okay";
};
};
300000 1200000
400000 1200000
>;
+ temp-channel=<2>;
+ temp-limit = <
+ /*temp freq*/
+ 50 600000
+ 70 500000
+ 80 400000
+ 100 300000
+ >;
status = "okay";
};
};
#include <linux/opp.h>
#include <linux/rockchip/dvfs.h>
+static struct workqueue_struct *dvfs_wq;
+
+extern int rockchip_tsadc_get_temp(int chn);
+
#define MHz (1000 * 1000)
static LIST_HEAD(rk_dvfs_tree);
static DEFINE_MUTEX(rk_dvfs_mutex);
return ;
pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
- /*for (i = 0; (clk_dvfs_node->pds[i].pd != NULL); i++) {
- pd = clk_dvfs_node->pds[i].pd;
- // DVFS_DBG("%s dvfs(%s),pd(%s)\n",__func__,clk_dvfs_node->name,pd->name);
- pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
- }*/
}
static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
}
-int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned max_rate)
+static void dvfs_temp_limit_work_func(struct work_struct *work)
+{
+ unsigned long delay = HZ / 10; // 100ms
+ struct vd_node *vd;
+ struct pd_node *pd;
+ struct dvfs_node *clk_dvfs_node;
+
+ mutex_lock(&rk_dvfs_mutex);
+ list_for_each_entry(vd, &rk_dvfs_tree, node) {
+ mutex_lock(&vd->mutex);
+ list_for_each_entry(pd, &vd->pd_list, node) {
+ list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
+ if (clk_dvfs_node->temp_limit_table)
+ clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
+ }
+ }
+ mutex_unlock(&vd->mutex);
+ }
+ mutex_unlock(&rk_dvfs_mutex);
+
+ queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
+}
+static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
+
+
+int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
{
u32 rate = 0, ret = 0;
- if (!clk_dvfs_node)
+ if (!clk_dvfs_node || (min_rate > max_rate))
return -EINVAL;
if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
/* To reset clk_dvfs_node->min_rate/max_rate */
dvfs_get_rate_range(clk_dvfs_node);
clk_dvfs_node->freq_limit_en = 1;
- clk_dvfs_node->min_rate = min_rate > clk_dvfs_node->min_rate ? min_rate : clk_dvfs_node->min_rate;
- clk_dvfs_node->max_rate = max_rate < clk_dvfs_node->max_rate ? max_rate : clk_dvfs_node->max_rate;
+
+ if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
+ clk_dvfs_node->min_rate = min_rate;
+ }
+
+ if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
+ clk_dvfs_node->max_rate = max_rate;
+ }
+
if (clk_dvfs_node->last_set_rate == 0)
rate = clk_get_rate(clk_dvfs_node->clk);
else
clk_dvfs_node->freq_limit_en = 1;
dvfs_table_round_volt(clk_dvfs_node);
clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
+ clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq;
DVFS_DBG("%s: %s get freq %u!\n",
__func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
}
#endif
-
-#if 0
if(clk_dvfs_node->vd->cur_volt < clk_dvfs_node->set_volt) {
int ret;
- mutex_lock(&rk_dvfs_mutex);
ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, clk_dvfs_node->set_volt, clk_dvfs_node->set_volt);
if (ret < 0) {
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
- clk_dvfs_node->enable_dvfs = 0;
+ clk_dvfs_node->enable_count = 0;
DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
- mutex_unlock(&rk_dvfs_mutex);
- return -1;
+ mutex_unlock(&clk_dvfs_node->vd->mutex);
+ return -EAGAIN;
}
+ clk_dvfs_node->vd->cur_volt = clk_dvfs_node->set_volt;
clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
- mutex_unlock(&rk_dvfs_mutex);
}
-#endif
+
clk_dvfs_node->enable_count++;
} else {
DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
}
EXPORT_SYMBOL(clk_disable_dvfs);
+static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
+{
+ unsigned long limit_rate, temp_limit_rate;
+ int temp, i;
+
+ limit_rate = rate;
+ temp_limit_rate = -1;
+ if (clk_dvfs_node->freq_limit_en) {
+ //dvfs table limit
+ if (rate < clk_dvfs_node->min_rate) {
+ limit_rate = clk_dvfs_node->min_rate;
+ } else if (rate > clk_dvfs_node->max_rate) {
+ limit_rate = clk_dvfs_node->max_rate;
+ }
+
+ //temp limt
+ if (clk_dvfs_node->temp_limit_table) {
+ temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
+ for (i=0; clk_dvfs_node->temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (temp > clk_dvfs_node->temp_limit_table[i].index) {
+ temp_limit_rate = clk_dvfs_node->temp_limit_table[i].frequency;
+ }
+ }
+
+ if (limit_rate > temp_limit_rate) {
+ DVFS_DBG("%s: temp(%d) limit clk(%s) rate %ld to %ld\n",
+ __func__, temp, clk_dvfs_node->name, limit_rate, temp_limit_rate);
+ limit_rate = temp_limit_rate;
+ }
+ }
+ }
+
+ DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
+
+ return limit_rate;
+}
+
static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
{
struct cpufreq_frequency_table clk_fv;
}
}
- /* Check limit rate */
- //if (clk_dvfs_node->freq_limit_en) {
- if (rate < clk_dvfs_node->min_rate) {
- rate = clk_dvfs_node->min_rate;
- } else if (rate > clk_dvfs_node->max_rate) {
- rate = clk_dvfs_node->max_rate;
- }
- //}
-
+ rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
new_rate = clk_round_rate(clk, rate);
old_rate = clk_get_rate(clk);
if (new_rate == old_rate)
/* find the clk corresponding voltage */
ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
if (ret) {
- DVFS_ERR("%s:dvfs clk(%s) rate %luhz is larger,not support\n",
+ DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
__func__, clk_dvfs_node->name, new_rate);
return ret;
}
if (ret)
goto out;
}
-
- clk_dvfs_node->last_set_rate = new_rate;
-
+
return 0;
fail_roll_back:
clk_dvfs_node->set_volt = clk_volt_store;
if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
mutex_lock(&clk_dvfs_node->vd->mutex);
ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
+ clk_dvfs_node->last_set_rate = rate;
mutex_unlock(&clk_dvfs_node->vd->mutex);
} else {
DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
return 0;
}
+static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node)
+{
+ struct cpufreq_frequency_table *temp_limt_table = NULL;
+ const struct property *prop;
+ const __be32 *val;
+ int nr, i;
+
+ prop = of_find_property(dev_node, "temp-limit", NULL);
+ if (!prop)
+ return NULL;
+ if (!prop->value)
+ return NULL;
+
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ pr_err("%s: Invalid freq list\n", __func__);
+ return NULL;
+ }
+
+ temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (nr/2 + 1), GFP_KERNEL);
+
+ val = prop->value;
+
+ for (i=0; i<nr/2; i++){
+ temp_limt_table[i].index = be32_to_cpup(val++);
+ temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
+ }
+
+ temp_limt_table[i].index = 0;
+ temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
+
+ return temp_limt_table;
+
+}
+
int of_dvfs_init(void)
{
struct vd_node *vd;
struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
struct dvfs_node *dvfs_node;
struct clk *clk;
+ const __be32 *val;
int ret;
DVFS_DBG("%s\n", __func__);
continue;
}
- /*vd->regulator = regulator_get(NULL, vd->regulator_name);
- if (IS_ERR(vd->regulator)){
- DVFS_ERR("%s vd(%s) get regulator(%s) failed!\n", __func__, vd->name, vd->regulator_name);
- kfree(vd);
- continue;
- }*/
-
vd->suspend_volt = 0;
vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
dvfs_node->name = clk_dev_node->name;
dvfs_node->pd = pd;
dvfs_node->vd = vd;
+ val = of_get_property(clk_dev_node, "temp-channel", NULL);
+ if (val) {
+ dvfs_node->temp_channel = be32_to_cpup(val);
+ dvfs_node->temp_limit_table = of_get_temp_limit_table(clk_dev_node);
+ }
+
dvfs_node->dev.of_node = clk_dev_node;
ret = of_init_opp_table(&dvfs_node->dev);
if (ret) {
" enable_dvfs = %s\n",
clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
- printk( "| | |- clk limit:[%u, %u]; last set rate = %u\n",
+ printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %u\n",
+ clk_dvfs_node->freq_limit_en ? "enable" : "disable",
clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
- clk_dvfs_node->last_set_rate);
+ clk_dvfs_node->last_set_rate/1000);
for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
printk( "| | | |- freq = %d, volt = %d\n",
}
}
+ dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
+ queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
+
return ret;
}
-subsys_initcall(dvfs_init);
+late_initcall(dvfs_init);
static unsigned int low_battery_freq = 600 * 1000;
static unsigned int low_battery_capacity = 5; // 5%
static bool is_booting = true;
-static struct workqueue_struct *freq_wq;
static DEFINE_MUTEX(cpufreq_mutex);
static bool gpu_is_mali400;
struct dvfs_node *clk_cpu_dvfs_node = NULL;
return target_freq;
}
-/**********************thermal limit**************************/
-//#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
-
-#ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
-static unsigned int temp_limit_freq = -1;
-module_param(temp_limit_freq, uint, 0444);
-
-static struct cpufreq_frequency_table temp_limits[4][4] = {
- { // 1 CPU busy
- {.frequency = -1, .index = 50},
- {.frequency = -1, .index = 55},
- {.frequency = -1, .index = 60},
- {.frequency = 1608 * 1000, .index = 75},
- }, { // 2 CPUs busy
- {.frequency = 1800 * 1000, .index = 50},
- {.frequency = 1608 * 1000, .index = 55},
- {.frequency = 1416 * 1000, .index = 60},
- {.frequency = 1200 * 1000, .index = 75},
- }, { // 3 CPUs busy
- {.frequency = 1608 * 1000, .index = 50},
- {.frequency = 1416 * 1000, .index = 55},
- {.frequency = 1200 * 1000, .index = 60},
- {.frequency = 1008 * 1000, .index = 75},
- }, { // 4 CPUs busy
- {.frequency = 1416 * 1000, .index = 50},
- {.frequency = 1200 * 1000, .index = 55},
- {.frequency = 1008 * 1000, .index = 60},
- {.frequency = 816 * 1000, .index = 75},
- }
-};
-
-static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
- {.frequency = 1008 * 1000, .index = 100},
-};
-
-static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
- {.frequency = 1008 * 1000, .index = 0},
-};
-
-static int get_temp(void)
-{
- return 60;
-}
-
-static char sys_state;
-static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
-{
- char state;
-
- if (count < 1)
- return count;
- if (copy_from_user(&state, buffer, 1)) {
- return -EFAULT;
- }
-
- sys_state = state;
- return count;
-}
-
-static const struct file_operations sys_state_fops = {
- .owner = THIS_MODULE,
- .write = sys_state_write,
-};
-
-static struct miscdevice sys_state_dev = {
- .fops = &sys_state_fops,
- .name = "sys_state",
- .minor = MISC_DYNAMIC_MINOR,
-};
-
-static void cpufreq_temp_limit_work_func(struct work_struct *work)
-{
- static bool in_perf = false;
- struct cpufreq_policy *policy;
- int temp, i;
- unsigned int new_freq = -1;
- unsigned long delay = HZ / 10; // 100ms
- unsigned int nr_cpus = num_online_cpus();
- const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
- size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
-
- temp = get_temp();
-
- if (sys_state == '1') {
- in_perf = true;
- if (gpu_is_mali400) {
- unsigned int gpu_irqs[2];
- gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
- msleep(40);
- gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
- delay = 0;
- if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
- limits_table = temp_limits_cpu_perf;
- limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
- } else {
- limits_table = temp_limits_gpu_perf;
- limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
- }
- } else {
- delay = HZ; // 1s
- limits_table = temp_limits_cpu_perf;
- limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
- }
- } else if (in_perf) {
- in_perf = false;
- } else {
- static u64 last_time_in_idle = 0;
- static u64 last_time_in_idle_timestamp = 0;
- u64 time_in_idle = 0, now;
- u32 delta_idle;
- u32 delta_time;
- unsigned cpu;
-
- for_each_online_cpu(cpu) {
- time_in_idle += get_cpu_idle_time_us(cpu, &now);
- }
- delta_time = now - last_time_in_idle_timestamp;
- delta_idle = time_in_idle - last_time_in_idle;
- last_time_in_idle = time_in_idle;
- last_time_in_idle_timestamp = now;
- delta_idle += delta_time >> 4; // +6.25%
- if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
- limits_table = temp_limits[0];
- else if (delta_idle > (nr_cpus - 2) * delta_time)
- limits_table = temp_limits[1];
- else if (delta_idle > (nr_cpus - 3) * delta_time)
- limits_table = temp_limits[2];
- FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
- }
-
- for (i = 0; i < limits_size; i++) {
- if (temp >= limits_table[i].index) {
- new_freq = limits_table[i].frequency;
- }
- }
-
- if (temp_limit_freq != new_freq) {
- unsigned int cur_freq;
- temp_limit_freq = new_freq;
- cur_freq = cpufreq_get_rate(0);
- FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
- if (cur_freq > temp_limit_freq) {
- policy = cpufreq_cpu_get(0);
- cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
- cpufreq_cpu_put(policy);
- }
- }
-
- queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
-}
-
-static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
-
static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
{
struct cpufreq_policy *policy = data;
- if (val != CPUFREQ_NOTIFY)
+ if (val != CPUFREQ_ADJUST)
return 0;
if (cpufreq_is_ondemand(policy)) {
FREQ_DBG("queue work\n");
- queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
+ dvfs_clk_enable_limit(clk_cpu_dvfs_node, 0, ~0);
} else {
FREQ_DBG("cancel work\n");
- cancel_delayed_work_sync(&cpufreq_temp_limit_work);
+ dvfs_clk_disable_limit(clk_cpu_dvfs_node);
}
return 0;
.notifier_call = cpufreq_notifier_policy
};
-static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
-{
- unsigned int i;
- struct cpufreq_frequency_table *table;
-
- table = temp_limits[0];
- for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
- table[i].frequency = get_freq_from_table(table[i].frequency);
- }
- table = temp_limits_cpu_perf;
- for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
- table[i].frequency = get_freq_from_table(table[i].frequency);
- }
- table = temp_limits_gpu_perf;
- for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
- table[i].frequency = get_freq_from_table(table[i].frequency);
- }
- misc_register(&sys_state_dev);
- if (cpufreq_is_ondemand(policy)) {
- queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
- }
- cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
-}
-
-static void cpufreq_temp_limit_exit(void)
-{
- cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
- if (freq_wq)
- cancel_delayed_work(&cpufreq_temp_limit_work);
-}
-#else
-static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
-static inline void cpufreq_temp_limit_exit(void) {}
-#endif
-
static int cpufreq_verify(struct cpufreq_policy *policy)
{
if (!freq_table)
clk_vepu_dvfs_node = clk_get_dvfs_node("clk_vepu");
if (clk_vepu_dvfs_node){
clk_enable_dvfs(clk_vepu_dvfs_node);
- dvfs_clk_set_rate(clk_vepu_dvfs_node, 198000000);
- dvfs_clk_set_rate(clk_vepu_dvfs_node, 297000000);
}
clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
low_battery_freq = get_freq_from_table(low_battery_freq);
clk_enable_dvfs(clk_cpu_dvfs_node);
- freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
- cpufreq_temp_limit_init(policy);
+ cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
return 0;
cpufreq_frequency_table_cpuinfo(policy, freq_table);
clk_put_dvfs_node(clk_cpu_dvfs_node);
- cpufreq_temp_limit_exit();
- if (freq_wq) {
- flush_workqueue(freq_wq);
- destroy_workqueue(freq_wq);
- freq_wq = NULL;
- }
+ cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
return 0;
}
}
}
-#ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
- {
- static unsigned int ondemand_target = 816 * 1000;
- if (is_private)
- target_freq = ondemand_target;
- else
- ondemand_target = target_freq;
- }
-
- /*
- * If the new frequency is more than the thermal max allowed
- * frequency, go ahead and scale the mpu device to proper frequency.
- */
- target_freq = min(target_freq, temp_limit_freq);
-#endif
-
return target_freq;
}
unsigned int min_rate; //limit min frequency
unsigned int max_rate; //limit max frequency
unsigned int last_set_rate;
+ unsigned int temp_channel;
struct clk *clk;
struct pd_node *pd;
struct vd_node *vd;
struct list_head node;
struct notifier_block *dvfs_nb;
struct cpufreq_frequency_table *dvfs_table;
- struct cpufreq_frequency_table *condition_freq_table;
+ struct cpufreq_frequency_table *temp_limit_table;
clk_set_rate_callback clk_dvfs_target;
};