/*
- * Copyright (C) 2013 ROCKCHIP, Inc.
+ * Rockchip CPUFreq Driver
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
-#include <linux/kernel_stat.h>
#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/reboot.h>
-#include <linux/suspend.h>
-#include <linux/tick.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/regulator/consumer.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/string.h>
-#include <linux/rockchip/cpu.h>
-#include <linux/rockchip/dvfs.h>
-#include <asm/smp_plat.h>
-#include <asm/cpu.h>
-#include <asm/unistd.h>
-#include <asm/uaccess.h>
-#include "../../../drivers/clk/rockchip/clk-pd.h"
-
-extern void dvfs_disable_temp_limit(void);
-
-#define VERSION "1.0"
-
-#ifdef DEBUG
-#define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
-#define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
-#else
-#define FREQ_DBG(fmt, args...) do {} while(0)
-#define FREQ_LOG(fmt, args...) do {} while(0)
-#endif
-#define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
-
-/* Frequency table index must be sequential starting at 0 */
-static struct cpufreq_frequency_table default_freq_table[] = {
- {.frequency = 312 * 1000, .index = 875 * 1000},
- {.frequency = 504 * 1000, .index = 925 * 1000},
- {.frequency = 816 * 1000, .index = 975 * 1000},
- {.frequency = 1008 * 1000, .index = 1075 * 1000},
- {.frequency = 1200 * 1000, .index = 1150 * 1000},
- {.frequency = 1416 * 1000, .index = 1250 * 1000},
- {.frequency = 1608 * 1000, .index = 1350 * 1000},
- {.frequency = CPUFREQ_TABLE_END},
+#include <linux/slab.h>
+
+#include "../clk/rockchip/clk.h"
+
+#define MAX_PROP_NAME_LEN 3
+#define LEAKAGE_TABLE_END ~1
+#define INVALID_VALUE 0xff
+
+#define REBOOT_FREQ 816000 /* kHz */
+
+struct leakage_table {
+ int min;
+ int max;
+ int value;
};
-static struct cpufreq_frequency_table *freq_table = default_freq_table;
-/*********************************************************/
-/* additional symantics for "relation" in cpufreq with pm */
-#define DISABLE_FURTHER_CPUFREQ 0x10
-#define ENABLE_FURTHER_CPUFREQ 0x20
-#define MASK_FURTHER_CPUFREQ 0x30
-/* With 0x00(NOCHANGE), it depends on the previous "further" status */
-#define CPUFREQ_PRIVATE 0x100
-static unsigned int no_cpufreq_access = 0;
-static unsigned int suspend_freq = 816 * 1000;
-static unsigned int suspend_volt = 1000000; // 1V
-static unsigned int low_battery_freq = 600 * 1000;
-static unsigned int low_battery_capacity = 5; // 5%
-static bool is_booting = true;
-static DEFINE_MUTEX(cpufreq_mutex);
-static bool gpu_is_mali400;
-struct dvfs_node *clk_cpu_dvfs_node = NULL;
-struct dvfs_node *clk_gpu_dvfs_node = NULL;
-struct dvfs_node *aclk_vio1_dvfs_node = NULL;
-struct dvfs_node *clk_ddr_dvfs_node = NULL;
-/*******************************************************/
-static unsigned int cpufreq_get_rate(unsigned int cpu)
-{
- if (clk_cpu_dvfs_node)
- return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
- return 0;
-}
+struct cluster_info {
+ struct list_head list_head;
+ cpumask_t cpus;
+ int leakage;
+ int lkg_volt_sel;
+ int soc_version;
+ bool set_opp;
+ unsigned int reboot_freq;
+ bool rebooting;
+};
-static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
-{
- char c = 0;
- if (policy && policy->governor)
- c = policy->governor->name[0];
- return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
-}
+static LIST_HEAD(cluster_info_list);
-static unsigned int get_freq_from_table(unsigned int max_freq)
+static struct cluster_info *rockchip_cluster_info_lookup(int cpu)
{
- unsigned int i;
- unsigned int target_freq = 0;
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = freq_table[i].frequency;
- if (freq <= max_freq && target_freq < freq) {
- target_freq = freq;
- }
+ struct cluster_info *cluster;
+
+ list_for_each_entry(cluster, &cluster_info_list, list_head) {
+ if (cpumask_test_cpu(cpu, &cluster->cpus))
+ return cluster;
}
- if (!target_freq)
- target_freq = max_freq;
- return target_freq;
+
+ return NULL;
}
-static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
+static int rockchip_efuse_get_one_byte(struct device_node *np, char *porp_name,
+ int *value)
{
- static unsigned int min_rate=0, max_rate=-1;
- struct cpufreq_policy *policy = data;
+ struct nvmem_cell *cell;
+ unsigned char *buf;
+ size_t len;
- if (val != CPUFREQ_ADJUST)
- return 0;
+ cell = of_nvmem_cell_get(np, porp_name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
- if (cpufreq_is_ondemand(policy)) {
- FREQ_DBG("queue work\n");
- dvfs_clk_enable_limit(clk_cpu_dvfs_node, min_rate, max_rate);
- } else {
- FREQ_DBG("cancel work\n");
- dvfs_clk_get_limit(clk_cpu_dvfs_node, &min_rate, &max_rate);
- }
+ buf = (unsigned char *)nvmem_cell_read(cell, &len);
- return 0;
-}
+ nvmem_cell_put(cell);
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_notifier_policy
-};
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
-static int cpufreq_verify(struct cpufreq_policy *policy)
-{
- if (!freq_table)
+ if (buf[0] == INVALID_VALUE)
return -EINVAL;
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
-{
- int ret;
- struct cpufreq_freqs freqs;
- struct cpufreq_policy *policy;
-
- freqs.new = rate / 1000;
- freqs.old = clk_get_rate(clk) / 1000;
-
- for_each_online_cpu(freqs.cpu) {
- policy = cpufreq_cpu_get(freqs.cpu);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_cpu_put(policy);
- }
-
- FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
-
- ret = clk_set_rate(clk, rate);
-
- freqs.new = clk_get_rate(clk) / 1000;
- /* notifiers */
- for_each_online_cpu(freqs.cpu) {
- policy = cpufreq_cpu_get(freqs.cpu);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- cpufreq_cpu_put(policy);
- }
+ *value = buf[0];
- return ret;
-
+ kfree(buf);
+
+ return 0;
}
-static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
+static int rk3399_get_soc_version(struct device_node *np, int *soc_version)
{
- unsigned int i;
- gpu_is_mali400 = cpu_is_rk3188();
-
- clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
- if (clk_gpu_dvfs_node){
- clk_enable_dvfs(clk_gpu_dvfs_node);
- if (gpu_is_mali400)
- dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000);
- }
+ int ret, version;
- clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
- if (clk_ddr_dvfs_node){
- clk_enable_dvfs(clk_ddr_dvfs_node);
- }
+ if (of_property_match_string(np, "nvmem-cell-names",
+ "soc_version") < 0)
+ return 0;
- clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
- if (!clk_cpu_dvfs_node){
- return -EINVAL;
- }
- dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
- freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
- if (freq_table == NULL) {
- freq_table = default_freq_table;
- } else {
- int v = INT_MAX;
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
- suspend_freq = freq_table[i].frequency;
- v = freq_table[i].index;
- }
- }
- }
- low_battery_freq = get_freq_from_table(low_battery_freq);
- clk_enable_dvfs(clk_cpu_dvfs_node);
+ ret = rockchip_efuse_get_one_byte(np, "soc_version",
+ &version);
+ if (ret)
+ return ret;
- cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
+ *soc_version = (version & 0xf0) >> 4;
- printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
return 0;
}
-static int cpufreq_init(struct cpufreq_policy *policy)
+static const struct of_device_id rockchip_cpufreq_of_match[] = {
+ {
+ .compatible = "rockchip,rk3399",
+ .data = (void *)&rk3399_get_soc_version,
+ },
+ {},
+};
+
+static int rockchip_get_leakage_table(struct device_node *np, char *porp_name,
+ struct leakage_table **table)
{
- static int cpu0_err;
-
- if (policy->cpu == 0) {
- cpu0_err = cpufreq_init_cpu0(policy);
- }
-
- if (cpu0_err)
- return cpu0_err;
-
- //set freq min max
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- //sys nod
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-
- policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
-
- policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
-
- /*
- * On SMP configuartion, both processors share the voltage
- * and clock. So both CPUs needs to be scaled together and hence
- * needs software co-ordination. Use cpufreq affected_cpus
- * interface to handle this scenario. Additional is_smp() check
- * is to keep SMP_ON_UP build working.
- */
- if (is_smp())
- cpumask_setall(policy->cpus);
+ struct leakage_table *lkg_table;
+ const struct property *prop;
+ int count, i;
- return 0;
+ prop = of_find_property(np, porp_name, NULL);
+ if (!prop)
+ return -EINVAL;
-}
+ if (!prop->value)
+ return -ENODATA;
-static int cpufreq_exit(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return 0;
+ count = of_property_count_u32_elems(np, porp_name);
+ if (count < 0)
+ return -EINVAL;
+
+ if (count % 3)
+ return -EINVAL;
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- clk_put_dvfs_node(clk_cpu_dvfs_node);
- cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
+ lkg_table = kzalloc(sizeof(*lkg_table) * (count / 3 + 1), GFP_KERNEL);
+ if (!lkg_table)
+ return -ENOMEM;
+
+ for (i = 0; i < count / 3; i++) {
+ of_property_read_u32_index(np, porp_name, 3 * i,
+ &lkg_table[i].min);
+ of_property_read_u32_index(np, porp_name, 3 * i + 1,
+ &lkg_table[i].max);
+ of_property_read_u32_index(np, porp_name, 3 * i + 2,
+ &lkg_table[i].value);
+ }
+ lkg_table[i].min = 0;
+ lkg_table[i].max = 0;
+ lkg_table[i].value = LEAKAGE_TABLE_END;
+
+ *table = lkg_table;
return 0;
}
-static struct freq_attr *cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
+static int rockchip_get_leakage_sel(struct device_node *np, char *name,
+ int leakage, int *value)
+{
+ struct leakage_table *table;
+ struct property *prop;
+ int i, j = -1, ret;
-#ifdef CONFIG_CHARGER_DISPLAY
-extern int rk_get_system_battery_capacity(void);
-#else
-static int rk_get_system_battery_capacity(void) { return 100; }
-#endif
+ if (of_property_match_string(np, "nvmem-cell-names", "cpu_leakage") < 0)
+ return 0;
-static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
-{
- bool is_ondemand = cpufreq_is_ondemand(policy);
-
- if (!is_ondemand)
- return target_freq;
-
- if (is_booting) {
- s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
- if (boottime_ms > 60 * MSEC_PER_SEC) {
- is_booting = false;
- } else if (target_freq > low_battery_freq &&
- rk_get_system_battery_capacity() <= low_battery_capacity) {
- target_freq = low_battery_freq;
- }
+ prop = of_find_property(np, name, NULL);
+ if (!prop)
+ return 0;
+
+ ret = rockchip_get_leakage_table(np, name, &table);
+ if (ret)
+ return -EINVAL;
+
+ for (i = 0; table[i].value != LEAKAGE_TABLE_END; i++) {
+ if (leakage >= table[i].min)
+ j = i;
}
+ if (j != -1)
+ *value = table[j].value;
+ else
+ ret = -EINVAL;
+
+ kfree(table);
- return target_freq;
+ return ret;
}
-static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
+static int rockchip_cpufreq_of_parse_dt(int cpu, struct cluster_info *cluster)
{
- unsigned int i, new_freq = target_freq, new_rate, cur_rate;
- int ret = 0;
- bool is_private;
+ int (*get_soc_version)(struct device_node *np, int *soc_version);
+ const struct of_device_id *match;
+ struct device_node *node, *np;
+ struct clk *clk;
+ struct device *dev;
+ int ret, lkg_scaling_sel = -1;
+
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+
+ ret = dev_pm_opp_of_get_sharing_cpus(dev, &cluster->cpus);
+ if (ret)
+ return ret;
- if (!freq_table) {
- FREQ_ERR("no freq table!\n");
+ np = of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+ if (!np) {
+ dev_info(dev, "OPP-v2 not supported\n");
return -EINVAL;
}
- mutex_lock(&cpufreq_mutex);
+ cluster->soc_version = -1;
+ node = of_find_node_by_path("/");
+ match = of_match_node(rockchip_cpufreq_of_match, node);
+ if (match && match->data) {
+ get_soc_version = match->data;
+ ret = get_soc_version(np, &cluster->soc_version);
+ if (ret) {
+ dev_err(dev, "Failed to get chip_version\n");
+ return ret;
+ }
+ }
- is_private = relation & CPUFREQ_PRIVATE;
- relation &= ~CPUFREQ_PRIVATE;
+ if (of_property_read_u32(np, "reboot-freq", &cluster->reboot_freq))
+ cluster->reboot_freq = REBOOT_FREQ;
- if ((relation & ENABLE_FURTHER_CPUFREQ) && no_cpufreq_access)
- no_cpufreq_access--;
- if (no_cpufreq_access) {
- FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
- ret = -EINVAL;
- goto out;
+ ret = rockchip_efuse_get_one_byte(np, "cpu_leakage", &cluster->leakage);
+ if (ret)
+ dev_err(dev, "Failed to get cpu_leakage\n");
+ else
+ dev_info(dev, "leakage=%d\n", cluster->leakage);
+
+ cluster->lkg_volt_sel = -1;
+ ret = rockchip_get_leakage_sel(np, "leakage-voltage-sel",
+ cluster->leakage,
+ &cluster->lkg_volt_sel);
+ if (ret) {
+ dev_err(dev, "Failed to get voltage-sel\n");
+ return ret;
}
- if (relation & DISABLE_FURTHER_CPUFREQ)
- no_cpufreq_access++;
- relation &= ~MASK_FURTHER_CPUFREQ;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
+ ret = rockchip_get_leakage_sel(np, "leakage-scaling-sel",
+ cluster->leakage,
+ &lkg_scaling_sel);
if (ret) {
- FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
- goto out;
+ dev_err(dev, "Failed to get scaling-sel\n");
+ return ret;
+ } else if (lkg_scaling_sel >= 0) {
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Failed to get opp clk");
+ return PTR_ERR(clk);
+ }
+ ret = rockchip_pll_clk_adaptive_scaling(clk, lkg_scaling_sel);
+ if (ret) {
+ dev_err(dev, "Failed to adaptive scaling\n");
+ return ret;
+ }
}
- new_freq = freq_table[i].frequency;
- if (!no_cpufreq_access)
- new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
-
- new_rate = new_freq * 1000;
- cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
- FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
- if (new_rate == cur_rate)
- goto out;
- ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
-
-out:
- FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
- mutex_unlock(&cpufreq_mutex);
- return ret;
+ return 0;
}
-static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int rockchip_cpufreq_set_opp_info(int cpu, struct cluster_info *cluster)
{
- int ret = NOTIFY_DONE;
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ struct device *dev;
+ char name[MAX_PROP_NAME_LEN];
+ int ret, version;
+
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+
+ if (cluster->soc_version != -1 && cluster->lkg_volt_sel != -1)
+ snprintf(name, MAX_PROP_NAME_LEN, "S%d-L%d",
+ cluster->soc_version,
+ cluster->lkg_volt_sel);
+ else if (cluster->soc_version != -1 && cluster->lkg_volt_sel == -1)
+ snprintf(name, MAX_PROP_NAME_LEN, "S%d", cluster->soc_version);
+ else if (cluster->soc_version == -1 && cluster->lkg_volt_sel != -1)
+ snprintf(name, MAX_PROP_NAME_LEN, "L%d", cluster->lkg_volt_sel);
+ else
+ return 0;
- if (!policy)
+ ret = dev_pm_opp_set_prop_name(dev, name);
+ if (ret) {
+ dev_err(dev, "Failed to set prop name\n");
return ret;
+ }
+
+ if (cluster->soc_version != -1) {
+ version = BIT(cluster->soc_version);
+ ret = dev_pm_opp_set_supported_hw(dev, &version, 1);
+ if (ret) {
+ dev_err(dev, "Failed to set supported hardware\n");
+ return ret;
+ }
+ }
- if (!cpufreq_is_ondemand(policy))
- goto out;
+ return 0;
+}
- switch (event) {
- case PM_SUSPEND_PREPARE:
- policy->cur++;
- ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
- if (ret < 0) {
- ret = NOTIFY_BAD;
- goto out;
+static int rockchip_hotcpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct cluster_info *cluster;
+ cpumask_t cpus;
+ int number, ret;
+
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (!cluster)
+ return NOTIFY_OK;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ if (cluster->set_opp) {
+ ret = rockchip_cpufreq_set_opp_info(cpu, cluster);
+ if (ret)
+ pr_err("Failed to set cpu%d opp_info\n", cpu);
+ cluster->set_opp = false;
}
- ret = NOTIFY_OK;
break;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- //if (target_freq == policy->cur) then cpufreq_driver_target
- //will return, and our target will not be called, it casue
- //ENABLE_FURTHER_CPUFREQ flag invalid, avoid that.
- policy->cur++;
- cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
- ret = NOTIFY_OK;
+
+ case CPU_POST_DEAD:
+ cpumask_and(&cpus, &cluster->cpus, cpu_online_mask);
+ number = cpumask_weight(&cpus);
+ if (!number)
+ cluster->set_opp = true;
break;
}
-out:
- cpufreq_cpu_put(policy);
- return ret;
+
+ return NOTIFY_OK;
}
-static struct notifier_block cpufreq_pm_notifier = {
- .notifier_call = cpufreq_pm_notifier_event,
+static struct notifier_block rockchip_hotcpu_nb = {
+ .notifier_call = rockchip_hotcpu_notifier,
};
-static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int rockchip_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *ptr)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ int cpu;
+ struct cluster_info *cluster;
- if (policy) {
- is_booting = false;
- policy->cur++;
- cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
- cpufreq_cpu_put(policy);
+ list_for_each_entry(cluster, &cluster_info_list, list_head) {
+ cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
+ cluster->rebooting = true;
+ cpufreq_update_policy(cpu);
}
return NOTIFY_OK;
}
-int rockchip_cpufreq_reboot_limit_freq(void)
-{
- dvfs_disable_temp_limit();
- dvfs_clk_enable_limit(clk_cpu_dvfs_node, 1000*suspend_freq, 1000*suspend_freq);
- printk("cpufreq: reboot set core rate=%lu, volt=%d\n", dvfs_clk_get_rate(clk_cpu_dvfs_node),
- regulator_get_voltage(clk_cpu_dvfs_node->vd->regulator));
-
- return 0;
-}
-
-static struct notifier_block cpufreq_reboot_notifier = {
- .notifier_call = cpufreq_reboot_notifier_event,
+static struct notifier_block rockchip_reboot_nb = {
+ .notifier_call = rockchip_reboot_notifier,
};
-static int clk_pd_vio_notifier_call(struct notifier_block *nb, unsigned long event, void *ptr)
+static int rockchip_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- switch (event) {
- case RK_CLK_PD_PRE_ENABLE:
- if (aclk_vio1_dvfs_node)
- clk_enable_dvfs(aclk_vio1_dvfs_node);
- break;
- case RK_CLK_PD_POST_DISABLE:
- if (aclk_vio1_dvfs_node)
- clk_disable_dvfs(aclk_vio1_dvfs_node);
- break;
+ struct cpufreq_policy *policy = data;
+ struct cluster_info *cluster;
+
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_OK;
+
+ list_for_each_entry(cluster, &cluster_info_list, list_head) {
+ if (cluster->rebooting &&
+ cpumask_test_cpu(policy->cpu, &cluster->cpus)) {
+ if (cluster->reboot_freq < policy->max)
+ policy->max = cluster->reboot_freq;
+ policy->min = policy->max;
+ pr_info("cpu%d limit freq=%d min=%d max=%d\n",
+ policy->cpu, cluster->reboot_freq,
+ policy->min, policy->max);
+ }
}
+
return NOTIFY_OK;
}
-static struct notifier_block clk_pd_vio_notifier = {
- .notifier_call = clk_pd_vio_notifier_call,
+static struct notifier_block rockchip_cpufreq_nb = {
+ .notifier_call = rockchip_cpufreq_notifier,
};
-
-static struct cpufreq_driver cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
- .verify = cpufreq_verify,
- .target = cpufreq_target,
- .get = cpufreq_get_rate,
- .init = cpufreq_init,
- .exit = cpufreq_exit,
- .name = "rockchip",
- .attr = cpufreq_attr,
-};
-
-static int __init cpufreq_driver_init(void)
+static int __init rockchip_cpufreq_driver_init(void)
{
- struct clk *clk;
+ struct platform_device *pdev;
+ struct cluster_info *cluster, *pos;
+ int cpu, first_cpu, ret, i = 0;
+
+ for_each_possible_cpu(cpu) {
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (cluster)
+ continue;
+
+ cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
+ if (!cluster)
+ return -ENOMEM;
+
+ ret = rockchip_cpufreq_of_parse_dt(cpu, cluster);
+ if (ret) {
+ if (ret != -ENOENT) {
+ pr_err("Failed to cpu%d parse_dt\n", cpu);
+ return ret;
+ }
+
+ /*
+ * As the OPP document said, only one OPP binding
+ * should be used per device.
+ * And if there are multiple clusters on rockchip
+ * platforms, we should use operating-points-v2.
+ * So if don't support operating-points-v2, there must
+ * be only one cluster, the list shuold be null.
+ */
+ list_for_each_entry(pos, &cluster_info_list, list_head)
+ i++;
+ if (i)
+ return ret;
+ /*
+ * If don't support operating-points-v2, there is no
+ * need to register notifiers.
+ */
+ goto next;
+ }
- clk = clk_get(NULL, "pd_vio");
- if (clk) {
- rk_clk_pd_notifier_register(clk, &clk_pd_vio_notifier);
- aclk_vio1_dvfs_node = clk_get_dvfs_node("aclk_vio1");
- if (aclk_vio1_dvfs_node && __clk_is_enabled(clk)){
- clk_enable_dvfs(aclk_vio1_dvfs_node);
+ first_cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
+ ret = rockchip_cpufreq_set_opp_info(first_cpu, cluster);
+ if (ret) {
+ pr_err("Failed to set cpu%d opp_info\n", first_cpu);
+ return ret;
}
+
+ list_add(&cluster->list_head, &cluster_info_list);
}
- register_pm_notifier(&cpufreq_pm_notifier);
- return cpufreq_register_driver(&cpufreq_driver);
+
+ register_hotcpu_notifier(&rockchip_hotcpu_nb);
+ register_reboot_notifier(&rockchip_reboot_nb);
+ cpufreq_register_notifier(&rockchip_cpufreq_nb,
+ CPUFREQ_POLICY_NOTIFIER);
+
+next:
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
}
+module_init(rockchip_cpufreq_driver_init);
-device_initcall(cpufreq_driver_init);
+MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip cpufreq driver");
+MODULE_LICENSE("GPL v2");