+
+static int rk29_cpufreq_aclk_vepu_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case CLK_PRE_ENABLE:
+ limit_vpu_enabled = true;
+ break;
+ case CLK_ABORT_ENABLE:
+ case CLK_POST_DISABLE:
+ limit_vpu_enabled = false;
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ if (limit_vpu_enabled) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+
+ if (policy) {
+ dprintk(DEBUG_TEMP, "vpu on\n");
+ cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rk29_cpufreq_aclk_vepu_notifier = {
+ .notifier_call = rk29_cpufreq_aclk_vepu_notifier_event,
+};
+
+static int rk29_cpufreq_clk_gpu_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct clk_notifier_data *cnd = ptr;
+ bool gpu_high_old = limit_gpu_enabled && limit_gpu_high;
+ bool gpu_high;
+
+ switch (event) {
+ case CLK_PRE_RATE_CHANGE:
+ if (cnd->new_rate > limit_gpu_low_rate)
+ limit_gpu_high = true;
+ break;
+ case CLK_ABORT_RATE_CHANGE:
+ if (cnd->new_rate > limit_gpu_low_rate && cnd->old_rate <= limit_gpu_low_rate)
+ limit_gpu_high = false;
+ break;
+ case CLK_POST_RATE_CHANGE:
+ if (cnd->new_rate <= limit_gpu_low_rate)
+ limit_gpu_high = false;
+ break;
+ case CLK_PRE_ENABLE:
+ limit_gpu_enabled = true;
+ break;
+ case CLK_ABORT_ENABLE:
+ case CLK_POST_DISABLE:
+ limit_gpu_enabled = false;
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ gpu_high = limit_gpu_enabled && limit_gpu_high;
+ if (gpu_high_old != gpu_high && gpu_high) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+
+ if (policy) {
+ dprintk(DEBUG_TEMP, "gpu high\n");
+ cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rk29_cpufreq_clk_gpu_notifier = {
+ .notifier_call = rk29_cpufreq_clk_gpu_notifier_event,
+};