2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #ifdef CONFIG_ARCH_RK29
34 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (10)
35 #define MICRO_FREQUENCY_UP_THRESHOLD (80)
37 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
38 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
40 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
41 #define MIN_FREQUENCY_UP_THRESHOLD (11)
42 #define MAX_FREQUENCY_UP_THRESHOLD (100)
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
54 #define MIN_SAMPLING_RATE_RATIO (2)
56 static unsigned int min_sampling_rate;
58 #define LATENCY_MULTIPLIER (1000)
59 #define MIN_LATENCY_MULTIPLIER (100)
60 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
62 static void do_dbs_timer(struct work_struct *work);
63 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
66 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
69 struct cpufreq_governor cpufreq_gov_ondemand = {
71 .governor = cpufreq_governor_dbs,
72 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
77 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
79 struct cpu_dbs_info_s {
80 cputime64_t prev_cpu_idle;
81 cputime64_t prev_cpu_iowait;
82 cputime64_t prev_cpu_wall;
83 cputime64_t prev_cpu_nice;
84 struct cpufreq_policy *cur_policy;
85 struct delayed_work work;
86 struct cpufreq_frequency_table *freq_table;
88 unsigned int freq_lo_jiffies;
89 unsigned int freq_hi_jiffies;
91 unsigned int sample_type:1;
93 * percpu mutex that serializes governor limit change with
94 * do_dbs_timer invocation. We do not want do_dbs_timer to run
95 * when user is changing the governor or limits.
97 struct mutex timer_mutex;
99 static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
101 static unsigned int dbs_enable; /* number of CPUs using this policy */
104 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
105 * different CPUs. It protects dbs_enable in governor start/stop.
107 static DEFINE_MUTEX(dbs_mutex);
109 static struct workqueue_struct *kondemand_wq;
111 static struct dbs_tuners {
112 unsigned int sampling_rate;
113 unsigned int up_threshold;
114 unsigned int down_differential;
115 unsigned int ignore_nice;
116 unsigned int powersave_bias;
117 unsigned int io_is_busy;
119 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
120 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
125 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
128 cputime64_t idle_time;
129 cputime64_t cur_wall_time;
130 cputime64_t busy_time;
132 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
133 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
134 kstat_cpu(cpu).cpustat.system);
136 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
137 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
138 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
139 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
141 idle_time = cputime64_sub(cur_wall_time, busy_time);
143 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
145 return (cputime64_t)jiffies_to_usecs(idle_time);
148 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
150 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
152 if (idle_time == -1ULL)
153 return get_cpu_idle_time_jiffy(cpu, wall);
158 static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
160 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
162 if (iowait_time == -1ULL)
169 * Find right freq to be set now with powersave_bias on.
170 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
171 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
173 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
174 unsigned int freq_next,
175 unsigned int relation)
177 unsigned int freq_req, freq_reduc, freq_avg;
178 unsigned int freq_hi, freq_lo;
179 unsigned int index = 0;
180 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
181 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
184 if (!dbs_info->freq_table) {
185 dbs_info->freq_lo = 0;
186 dbs_info->freq_lo_jiffies = 0;
190 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
192 freq_req = dbs_info->freq_table[index].frequency;
193 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
194 freq_avg = freq_req - freq_reduc;
196 /* Find freq bounds for freq_avg in freq_table */
198 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
199 CPUFREQ_RELATION_H, &index);
200 freq_lo = dbs_info->freq_table[index].frequency;
202 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
203 CPUFREQ_RELATION_L, &index);
204 freq_hi = dbs_info->freq_table[index].frequency;
206 /* Find out how long we have to be in hi and lo freqs */
207 if (freq_hi == freq_lo) {
208 dbs_info->freq_lo = 0;
209 dbs_info->freq_lo_jiffies = 0;
212 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
213 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
214 jiffies_hi += ((freq_hi - freq_lo) / 2);
215 jiffies_hi /= (freq_hi - freq_lo);
216 jiffies_lo = jiffies_total - jiffies_hi;
217 dbs_info->freq_lo = freq_lo;
218 dbs_info->freq_lo_jiffies = jiffies_lo;
219 dbs_info->freq_hi_jiffies = jiffies_hi;
223 static void ondemand_powersave_bias_init_cpu(int cpu)
225 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
226 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
227 dbs_info->freq_lo = 0;
230 static void ondemand_powersave_bias_init(void)
233 for_each_online_cpu(i) {
234 ondemand_powersave_bias_init_cpu(i);
238 /************************** sysfs interface ************************/
240 static ssize_t show_sampling_rate_max(struct kobject *kobj,
241 struct attribute *attr, char *buf)
243 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
244 "sysfs file is deprecated - used by: %s\n", current->comm);
245 return sprintf(buf, "%u\n", -1U);
248 static ssize_t show_sampling_rate_min(struct kobject *kobj,
249 struct attribute *attr, char *buf)
251 return sprintf(buf, "%u\n", min_sampling_rate);
254 define_one_global_ro(sampling_rate_max);
255 define_one_global_ro(sampling_rate_min);
257 /* cpufreq_ondemand Governor Tunables */
258 #define show_one(file_name, object) \
259 static ssize_t show_##file_name \
260 (struct kobject *kobj, struct attribute *attr, char *buf) \
262 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
264 show_one(sampling_rate, sampling_rate);
265 show_one(io_is_busy, io_is_busy);
266 show_one(up_threshold, up_threshold);
267 show_one(ignore_nice_load, ignore_nice);
268 show_one(powersave_bias, powersave_bias);
270 /*** delete after deprecation time ***/
272 #define DEPRECATION_MSG(file_name) \
273 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
274 "interface is deprecated - " #file_name "\n");
276 #define show_one_old(file_name) \
277 static ssize_t show_##file_name##_old \
278 (struct cpufreq_policy *unused, char *buf) \
280 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
281 "interface is deprecated - " #file_name "\n"); \
282 return show_##file_name(NULL, NULL, buf); \
284 show_one_old(sampling_rate);
285 show_one_old(up_threshold);
286 show_one_old(ignore_nice_load);
287 show_one_old(powersave_bias);
288 show_one_old(sampling_rate_min);
289 show_one_old(sampling_rate_max);
291 cpufreq_freq_attr_ro_old(sampling_rate_min);
292 cpufreq_freq_attr_ro_old(sampling_rate_max);
294 /*** delete after deprecation time ***/
296 static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
297 const char *buf, size_t count)
301 ret = sscanf(buf, "%u", &input);
305 mutex_lock(&dbs_mutex);
306 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
307 mutex_unlock(&dbs_mutex);
312 static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
313 const char *buf, size_t count)
318 ret = sscanf(buf, "%u", &input);
322 mutex_lock(&dbs_mutex);
323 dbs_tuners_ins.io_is_busy = !!input;
324 mutex_unlock(&dbs_mutex);
329 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
330 const char *buf, size_t count)
334 ret = sscanf(buf, "%u", &input);
336 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
337 input < MIN_FREQUENCY_UP_THRESHOLD) {
341 mutex_lock(&dbs_mutex);
342 dbs_tuners_ins.up_threshold = input;
343 mutex_unlock(&dbs_mutex);
348 static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
349 const char *buf, size_t count)
356 ret = sscanf(buf, "%u", &input);
363 mutex_lock(&dbs_mutex);
364 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
365 mutex_unlock(&dbs_mutex);
368 dbs_tuners_ins.ignore_nice = input;
370 /* we need to re-evaluate prev_cpu_idle */
371 for_each_online_cpu(j) {
372 struct cpu_dbs_info_s *dbs_info;
373 dbs_info = &per_cpu(od_cpu_dbs_info, j);
374 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
375 &dbs_info->prev_cpu_wall);
376 if (dbs_tuners_ins.ignore_nice)
377 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
380 mutex_unlock(&dbs_mutex);
385 static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
386 const char *buf, size_t count)
390 ret = sscanf(buf, "%u", &input);
398 mutex_lock(&dbs_mutex);
399 dbs_tuners_ins.powersave_bias = input;
400 ondemand_powersave_bias_init();
401 mutex_unlock(&dbs_mutex);
406 define_one_global_rw(sampling_rate);
407 define_one_global_rw(io_is_busy);
408 define_one_global_rw(up_threshold);
409 define_one_global_rw(ignore_nice_load);
410 define_one_global_rw(powersave_bias);
412 static struct attribute *dbs_attributes[] = {
413 &sampling_rate_max.attr,
414 &sampling_rate_min.attr,
417 &ignore_nice_load.attr,
418 &powersave_bias.attr,
423 static struct attribute_group dbs_attr_group = {
424 .attrs = dbs_attributes,
428 /*** delete after deprecation time ***/
430 #define write_one_old(file_name) \
431 static ssize_t store_##file_name##_old \
432 (struct cpufreq_policy *unused, const char *buf, size_t count) \
434 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
435 "interface is deprecated - " #file_name "\n"); \
436 return store_##file_name(NULL, NULL, buf, count); \
438 write_one_old(sampling_rate);
439 write_one_old(up_threshold);
440 write_one_old(ignore_nice_load);
441 write_one_old(powersave_bias);
443 cpufreq_freq_attr_rw_old(sampling_rate);
444 cpufreq_freq_attr_rw_old(up_threshold);
445 cpufreq_freq_attr_rw_old(ignore_nice_load);
446 cpufreq_freq_attr_rw_old(powersave_bias);
448 static struct attribute *dbs_attributes_old[] = {
449 &sampling_rate_max_old.attr,
450 &sampling_rate_min_old.attr,
451 &sampling_rate_old.attr,
452 &up_threshold_old.attr,
453 &ignore_nice_load_old.attr,
454 &powersave_bias_old.attr,
458 static struct attribute_group dbs_attr_group_old = {
459 .attrs = dbs_attributes_old,
463 /*** delete after deprecation time ***/
465 /************************** sysfs end ************************/
467 static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
469 if (dbs_tuners_ins.powersave_bias)
470 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
471 else if (p->cur == p->max)
474 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
475 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
478 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
480 unsigned int max_load_freq;
482 struct cpufreq_policy *policy;
485 this_dbs_info->freq_lo = 0;
486 policy = this_dbs_info->cur_policy;
489 * Every sampling_rate, we check, if current idle time is less
490 * than 20% (default), then we try to increase frequency
491 * Every sampling_rate, we look for a the lowest
492 * frequency which can sustain the load while keeping idle time over
493 * 30%. If such a frequency exist, we try to decrease to this frequency.
495 * Any frequency increase takes it to the maximum frequency.
496 * Frequency reduction happens at minimum steps of
497 * 5% (default) of current frequency
500 /* Get Absolute Load - in terms of freq */
503 for_each_cpu(j, policy->cpus) {
504 struct cpu_dbs_info_s *j_dbs_info;
505 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
506 unsigned int idle_time, wall_time, iowait_time;
507 unsigned int load, load_freq;
510 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
512 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
513 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
515 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
516 j_dbs_info->prev_cpu_wall);
517 j_dbs_info->prev_cpu_wall = cur_wall_time;
519 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
520 j_dbs_info->prev_cpu_idle);
521 j_dbs_info->prev_cpu_idle = cur_idle_time;
523 iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
524 j_dbs_info->prev_cpu_iowait);
525 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
527 if (dbs_tuners_ins.ignore_nice) {
528 cputime64_t cur_nice;
529 unsigned long cur_nice_jiffies;
531 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
532 j_dbs_info->prev_cpu_nice);
534 * Assumption: nice time between sampling periods will
535 * be less than 2^32 jiffies for 32 bit sys
537 cur_nice_jiffies = (unsigned long)
538 cputime64_to_jiffies64(cur_nice);
540 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
541 idle_time += jiffies_to_usecs(cur_nice_jiffies);
545 * For the purpose of ondemand, waiting for disk IO is an
546 * indication that you're performance critical, and not that
547 * the system is actually idle. So subtract the iowait time
548 * from the cpu idle time.
551 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
552 idle_time -= iowait_time;
554 if (unlikely(!wall_time || wall_time < idle_time))
557 load = 100 * (wall_time - idle_time) / wall_time;
559 freq_avg = __cpufreq_driver_getavg(policy, j);
561 freq_avg = policy->cur;
563 load_freq = load * freq_avg;
564 if (load_freq > max_load_freq)
565 max_load_freq = load_freq;
568 /* Check for frequency increase */
569 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
570 dbs_freq_increase(policy, policy->max);
574 /* Check for frequency decrease */
575 /* if we cannot reduce the frequency anymore, break out early */
576 if (policy->cur == policy->min)
580 * The optimal frequency is the frequency that is the lowest that
581 * can support the current CPU usage without triggering the up
582 * policy. To be safe, we focus 10 points under the threshold.
585 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
587 unsigned int freq_next;
588 freq_next = max_load_freq /
589 (dbs_tuners_ins.up_threshold -
590 dbs_tuners_ins.down_differential);
592 if (freq_next < policy->min)
593 freq_next = policy->min;
595 if (!dbs_tuners_ins.powersave_bias) {
596 __cpufreq_driver_target(policy, freq_next,
599 int freq = powersave_bias_target(policy, freq_next,
601 __cpufreq_driver_target(policy, freq,
607 static void do_dbs_timer(struct work_struct *work)
609 struct cpu_dbs_info_s *dbs_info =
610 container_of(work, struct cpu_dbs_info_s, work.work);
611 unsigned int cpu = dbs_info->cpu;
612 int sample_type = dbs_info->sample_type;
614 /* We want all CPUs to do sampling nearly on same jiffy */
615 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
617 if (num_online_cpus() > 1)
618 delay -= jiffies % delay;
620 mutex_lock(&dbs_info->timer_mutex);
622 /* Common NORMAL_SAMPLE setup */
623 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
624 if (!dbs_tuners_ins.powersave_bias ||
625 sample_type == DBS_NORMAL_SAMPLE) {
626 dbs_check_cpu(dbs_info);
627 if (dbs_info->freq_lo) {
628 /* Setup timer for SUB_SAMPLE */
629 dbs_info->sample_type = DBS_SUB_SAMPLE;
630 delay = dbs_info->freq_hi_jiffies;
633 __cpufreq_driver_target(dbs_info->cur_policy,
634 dbs_info->freq_lo, CPUFREQ_RELATION_H);
636 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
637 mutex_unlock(&dbs_info->timer_mutex);
640 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
642 /* We want all CPUs to do sampling nearly on same jiffy */
643 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
645 if (num_online_cpus() > 1)
646 delay -= jiffies % delay;
648 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
649 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
650 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
654 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
656 cancel_delayed_work_sync(&dbs_info->work);
660 * Not all CPUs want IO time to be accounted as busy; this dependson how
661 * efficient idling at a higher frequency/voltage is.
662 * Pavel Machek says this is not so for various generations of AMD and old
664 * Mike Chan (androidlcom) calis this is also not true for ARM.
665 * Because of this, whitelist specific known (series) of CPUs by default, and
666 * leave all others up to the user.
668 static int should_io_be_busy(void)
670 #if defined(CONFIG_X86)
672 * For Intel, Core 2 (model 15) andl later have an efficient idle.
674 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
675 boot_cpu_data.x86 == 6 &&
676 boot_cpu_data.x86_model >= 15)
682 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
685 unsigned int cpu = policy->cpu;
686 struct cpu_dbs_info_s *this_dbs_info;
690 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
693 case CPUFREQ_GOV_START:
694 if ((!cpu_online(cpu)) || (!policy->cur))
697 mutex_lock(&dbs_mutex);
699 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
701 mutex_unlock(&dbs_mutex);
706 for_each_cpu(j, policy->cpus) {
707 struct cpu_dbs_info_s *j_dbs_info;
708 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
709 j_dbs_info->cur_policy = policy;
711 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
712 &j_dbs_info->prev_cpu_wall);
713 if (dbs_tuners_ins.ignore_nice) {
714 j_dbs_info->prev_cpu_nice =
715 kstat_cpu(j).cpustat.nice;
718 this_dbs_info->cpu = cpu;
719 ondemand_powersave_bias_init_cpu(cpu);
721 * Start the timerschedule work, when this governor
722 * is used for first time
724 if (dbs_enable == 1) {
725 unsigned int latency;
727 rc = sysfs_create_group(cpufreq_global_kobject,
730 mutex_unlock(&dbs_mutex);
734 /* policy latency is in nS. Convert it to uS first */
735 latency = policy->cpuinfo.transition_latency / 1000;
738 /* Bring kernel and HW constraints together */
739 min_sampling_rate = max(min_sampling_rate,
740 MIN_LATENCY_MULTIPLIER * latency);
741 dbs_tuners_ins.sampling_rate =
742 max(min_sampling_rate,
743 latency * LATENCY_MULTIPLIER);
744 dbs_tuners_ins.io_is_busy = should_io_be_busy();
746 mutex_unlock(&dbs_mutex);
748 mutex_init(&this_dbs_info->timer_mutex);
749 dbs_timer_init(this_dbs_info);
752 case CPUFREQ_GOV_STOP:
753 dbs_timer_exit(this_dbs_info);
755 mutex_lock(&dbs_mutex);
756 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
757 mutex_destroy(&this_dbs_info->timer_mutex);
759 mutex_unlock(&dbs_mutex);
761 sysfs_remove_group(cpufreq_global_kobject,
766 case CPUFREQ_GOV_LIMITS:
767 mutex_lock(&this_dbs_info->timer_mutex);
768 if (policy->max < this_dbs_info->cur_policy->cur)
769 __cpufreq_driver_target(this_dbs_info->cur_policy,
770 policy->max, CPUFREQ_RELATION_H);
771 else if (policy->min > this_dbs_info->cur_policy->cur)
772 __cpufreq_driver_target(this_dbs_info->cur_policy,
773 policy->min, CPUFREQ_RELATION_L);
774 mutex_unlock(&this_dbs_info->timer_mutex);
780 static int __init cpufreq_gov_dbs_init(void)
787 idle_time = get_cpu_idle_time_us(cpu, &wall);
789 if (idle_time != -1ULL) {
790 /* Idle micro accounting is supported. Use finer thresholds */
791 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
792 dbs_tuners_ins.down_differential =
793 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
795 * In no_hz/micro accounting case we set the minimum frequency
796 * not depending on HZ, but fixed (very low). The deferred
797 * timer might skip some samples if idle/sleeping as needed.
799 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
801 /* For correct statistics, we need 10 ticks for each measure */
803 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
806 kondemand_wq = create_workqueue("kondemand");
808 printk(KERN_ERR "Creation of kondemand failed\n");
811 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
813 destroy_workqueue(kondemand_wq);
818 static void __exit cpufreq_gov_dbs_exit(void)
820 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
821 destroy_workqueue(kondemand_wq);
825 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
826 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
827 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
828 "Low Latency Frequency Transition capable processors");
829 MODULE_LICENSE("GPL");
831 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
832 fs_initcall(cpufreq_gov_dbs_init);
834 module_init(cpufreq_gov_dbs_init);
836 module_exit(cpufreq_gov_dbs_exit);