2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <asm/cputime.h>
20 #include <linux/cpufreq.h>
21 #include <linux/cpumask.h>
22 #include <linux/export.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/types.h>
27 #include <linux/workqueue.h>
29 #include "cpufreq_governor.h"
31 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
33 if (have_governor_per_policy())
34 return dbs_data->cdata->attr_group_gov_pol;
36 return dbs_data->cdata->attr_group_gov_sys;
39 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
41 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
42 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
43 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
44 struct cpufreq_policy *policy;
45 unsigned int max_load = 0;
46 unsigned int ignore_nice;
49 if (dbs_data->cdata->governor == GOV_ONDEMAND)
50 ignore_nice = od_tuners->ignore_nice;
52 ignore_nice = cs_tuners->ignore_nice;
54 policy = cdbs->cur_policy;
56 /* Get Absolute Load (in terms of freq for ondemand gov) */
57 for_each_cpu(j, policy->cpus) {
58 struct cpu_dbs_common_info *j_cdbs;
59 u64 cur_wall_time, cur_idle_time;
60 unsigned int idle_time, wall_time;
64 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
67 * For the purpose of ondemand, waiting for disk IO is
68 * an indication that you're performance critical, and
69 * not that the system is actually idle. So do not add
70 * the iowait time to the cpu idle time.
72 if (dbs_data->cdata->governor == GOV_ONDEMAND)
73 io_busy = od_tuners->io_is_busy;
74 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
76 wall_time = (unsigned int)
77 (cur_wall_time - j_cdbs->prev_cpu_wall);
78 j_cdbs->prev_cpu_wall = cur_wall_time;
80 idle_time = (unsigned int)
81 (cur_idle_time - j_cdbs->prev_cpu_idle);
82 j_cdbs->prev_cpu_idle = cur_idle_time;
86 unsigned long cur_nice_jiffies;
88 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
91 * Assumption: nice time between sampling periods will
92 * be less than 2^32 jiffies for 32 bit sys
94 cur_nice_jiffies = (unsigned long)
95 cputime64_to_jiffies64(cur_nice);
98 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
99 idle_time += jiffies_to_usecs(cur_nice_jiffies);
102 if (unlikely(!wall_time || wall_time < idle_time))
105 load = 100 * (wall_time - idle_time) / wall_time;
107 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
108 int freq_avg = __cpufreq_driver_getavg(policy, j);
110 freq_avg = policy->cur;
119 dbs_data->cdata->gov_check_cpu(cpu, max_load);
121 EXPORT_SYMBOL_GPL(dbs_check_cpu);
123 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
126 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
128 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
131 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
132 unsigned int delay, bool all_cpus)
137 __gov_queue_work(smp_processor_id(), dbs_data, delay);
139 for_each_cpu(i, policy->cpus)
140 __gov_queue_work(i, dbs_data, delay);
143 EXPORT_SYMBOL_GPL(gov_queue_work);
145 static inline void gov_cancel_work(struct dbs_data *dbs_data,
146 struct cpufreq_policy *policy)
148 struct cpu_dbs_common_info *cdbs;
151 for_each_cpu(i, policy->cpus) {
152 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
153 cancel_delayed_work_sync(&cdbs->work);
157 /* Will return if we need to evaluate cpu load again or not */
158 bool need_load_eval(struct cpu_dbs_common_info *cdbs,
159 unsigned int sampling_rate)
161 if (policy_is_shared(cdbs->cur_policy)) {
162 ktime_t time_now = ktime_get();
163 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
165 /* Do nothing if we recently have sampled */
166 if (delta_us < (s64)(sampling_rate / 2))
169 cdbs->time_stamp = time_now;
174 EXPORT_SYMBOL_GPL(need_load_eval);
176 static void set_sampling_rate(struct dbs_data *dbs_data,
177 unsigned int sampling_rate)
179 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
180 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181 cs_tuners->sampling_rate = sampling_rate;
183 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
184 od_tuners->sampling_rate = sampling_rate;
188 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
189 struct common_dbs_data *cdata, unsigned int event)
191 struct dbs_data *dbs_data;
192 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
193 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
194 struct od_ops *od_ops = NULL;
195 struct od_dbs_tuners *od_tuners = NULL;
196 struct cs_dbs_tuners *cs_tuners = NULL;
197 struct cpu_dbs_common_info *cpu_cdbs;
198 unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
202 if (have_governor_per_policy())
203 dbs_data = policy->governor_data;
205 dbs_data = cdata->gdbs_data;
207 WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
210 case CPUFREQ_GOV_POLICY_INIT:
211 if (have_governor_per_policy()) {
213 } else if (dbs_data) {
214 dbs_data->usage_count++;
215 policy->governor_data = dbs_data;
219 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
221 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
225 dbs_data->cdata = cdata;
226 dbs_data->usage_count = 1;
227 rc = cdata->init(dbs_data);
229 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
234 rc = sysfs_create_group(get_governor_parent_kobj(policy),
235 get_sysfs_attr(dbs_data));
237 cdata->exit(dbs_data);
242 policy->governor_data = dbs_data;
244 /* policy latency is in nS. Convert it to uS first */
245 latency = policy->cpuinfo.transition_latency / 1000;
249 /* Bring kernel and HW constraints together */
250 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
251 MIN_LATENCY_MULTIPLIER * latency);
252 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
253 latency * LATENCY_MULTIPLIER));
255 if ((cdata->governor == GOV_CONSERVATIVE) &&
256 (!policy->governor->initialized)) {
257 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
259 cpufreq_register_notifier(cs_ops->notifier_block,
260 CPUFREQ_TRANSITION_NOTIFIER);
263 if (!have_governor_per_policy())
264 cdata->gdbs_data = dbs_data;
267 case CPUFREQ_GOV_POLICY_EXIT:
268 if (!--dbs_data->usage_count) {
269 sysfs_remove_group(get_governor_parent_kobj(policy),
270 get_sysfs_attr(dbs_data));
272 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
273 (policy->governor->initialized == 1)) {
274 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
276 cpufreq_unregister_notifier(cs_ops->notifier_block,
277 CPUFREQ_TRANSITION_NOTIFIER);
280 cdata->exit(dbs_data);
282 cdata->gdbs_data = NULL;
285 policy->governor_data = NULL;
289 cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
291 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
292 cs_tuners = dbs_data->tuners;
293 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
294 sampling_rate = cs_tuners->sampling_rate;
295 ignore_nice = cs_tuners->ignore_nice;
297 od_tuners = dbs_data->tuners;
298 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
299 sampling_rate = od_tuners->sampling_rate;
300 ignore_nice = od_tuners->ignore_nice;
301 od_ops = dbs_data->cdata->gov_ops;
302 io_busy = od_tuners->io_is_busy;
306 case CPUFREQ_GOV_START:
310 mutex_lock(&dbs_data->mutex);
312 for_each_cpu(j, policy->cpus) {
313 struct cpu_dbs_common_info *j_cdbs =
314 dbs_data->cdata->get_cpu_cdbs(j);
317 j_cdbs->cur_policy = policy;
318 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
319 &j_cdbs->prev_cpu_wall, io_busy);
321 j_cdbs->prev_cpu_nice =
322 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
324 mutex_init(&j_cdbs->timer_mutex);
325 INIT_DEFERRABLE_WORK(&j_cdbs->work,
326 dbs_data->cdata->gov_dbs_timer);
330 * conservative does not implement micro like ondemand
331 * governor, thus we are bound to jiffes/HZ
333 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
334 cs_dbs_info->down_skip = 0;
335 cs_dbs_info->enable = 1;
336 cs_dbs_info->requested_freq = policy->cur;
338 od_dbs_info->rate_mult = 1;
339 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
340 od_ops->powersave_bias_init_cpu(cpu);
343 mutex_unlock(&dbs_data->mutex);
345 /* Initiate timer time stamp */
346 cpu_cdbs->time_stamp = ktime_get();
348 gov_queue_work(dbs_data, policy,
349 delay_for_sampling_rate(sampling_rate), true);
352 case CPUFREQ_GOV_STOP:
353 if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
354 cs_dbs_info->enable = 0;
356 gov_cancel_work(dbs_data, policy);
358 mutex_lock(&dbs_data->mutex);
359 mutex_destroy(&cpu_cdbs->timer_mutex);
361 mutex_unlock(&dbs_data->mutex);
365 case CPUFREQ_GOV_LIMITS:
366 mutex_lock(&cpu_cdbs->timer_mutex);
367 if (policy->max < cpu_cdbs->cur_policy->cur)
368 __cpufreq_driver_target(cpu_cdbs->cur_policy,
369 policy->max, CPUFREQ_RELATION_H);
370 else if (policy->min > cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy,
372 policy->min, CPUFREQ_RELATION_L);
373 dbs_check_cpu(dbs_data, cpu);
374 mutex_unlock(&cpu_cdbs->timer_mutex);
379 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);