2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #ifdef CONFIG_ARCH_RK29
34 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (10)
35 #define MICRO_FREQUENCY_UP_THRESHOLD (80)
37 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
38 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
40 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
41 #define MIN_FREQUENCY_UP_THRESHOLD (11)
42 #define MAX_FREQUENCY_UP_THRESHOLD (100)
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
54 #define MIN_SAMPLING_RATE_RATIO (2)
56 static unsigned int min_sampling_rate;
58 #define LATENCY_MULTIPLIER (1000)
59 #define MIN_LATENCY_MULTIPLIER (100)
60 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
62 static void do_dbs_timer(struct work_struct *work);
63 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
66 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
69 struct cpufreq_governor cpufreq_gov_ondemand = {
71 .governor = cpufreq_governor_dbs,
72 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
77 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
79 struct cpu_dbs_info_s {
80 cputime64_t prev_cpu_idle;
81 cputime64_t prev_cpu_wall;
82 cputime64_t prev_cpu_nice;
83 struct cpufreq_policy *cur_policy;
84 struct delayed_work work;
85 struct cpufreq_frequency_table *freq_table;
87 unsigned int freq_lo_jiffies;
88 unsigned int freq_hi_jiffies;
90 unsigned int sample_type:1;
92 * percpu mutex that serializes governor limit change with
93 * do_dbs_timer invocation. We do not want do_dbs_timer to run
94 * when user is changing the governor or limits.
96 struct mutex timer_mutex;
98 static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
100 static unsigned int dbs_enable; /* number of CPUs using this policy */
103 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
104 * different CPUs. It protects dbs_enable in governor start/stop.
106 static DEFINE_MUTEX(dbs_mutex);
108 static struct workqueue_struct *kondemand_wq;
110 static struct dbs_tuners {
111 unsigned int sampling_rate;
112 unsigned int up_threshold;
113 unsigned int down_differential;
114 unsigned int ignore_nice;
115 unsigned int powersave_bias;
117 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
118 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
123 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
126 cputime64_t idle_time;
127 cputime64_t cur_wall_time;
128 cputime64_t busy_time;
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
132 kstat_cpu(cpu).cpustat.system);
134 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
135 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
136 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
137 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
139 idle_time = cputime64_sub(cur_wall_time, busy_time);
141 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
143 return (cputime64_t)jiffies_to_usecs(idle_time);
146 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
148 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
157 * Find right freq to be set now with powersave_bias on.
158 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
159 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
161 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
162 unsigned int freq_next,
163 unsigned int relation)
165 unsigned int freq_req, freq_reduc, freq_avg;
166 unsigned int freq_hi, freq_lo;
167 unsigned int index = 0;
168 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
169 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
172 if (!dbs_info->freq_table) {
173 dbs_info->freq_lo = 0;
174 dbs_info->freq_lo_jiffies = 0;
178 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
180 freq_req = dbs_info->freq_table[index].frequency;
181 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
182 freq_avg = freq_req - freq_reduc;
184 /* Find freq bounds for freq_avg in freq_table */
186 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
187 CPUFREQ_RELATION_H, &index);
188 freq_lo = dbs_info->freq_table[index].frequency;
190 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
191 CPUFREQ_RELATION_L, &index);
192 freq_hi = dbs_info->freq_table[index].frequency;
194 /* Find out how long we have to be in hi and lo freqs */
195 if (freq_hi == freq_lo) {
196 dbs_info->freq_lo = 0;
197 dbs_info->freq_lo_jiffies = 0;
200 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
201 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
202 jiffies_hi += ((freq_hi - freq_lo) / 2);
203 jiffies_hi /= (freq_hi - freq_lo);
204 jiffies_lo = jiffies_total - jiffies_hi;
205 dbs_info->freq_lo = freq_lo;
206 dbs_info->freq_lo_jiffies = jiffies_lo;
207 dbs_info->freq_hi_jiffies = jiffies_hi;
211 static void ondemand_powersave_bias_init_cpu(int cpu)
213 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
214 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
215 dbs_info->freq_lo = 0;
218 static void ondemand_powersave_bias_init(void)
221 for_each_online_cpu(i) {
222 ondemand_powersave_bias_init_cpu(i);
226 /************************** sysfs interface ************************/
228 static ssize_t show_sampling_rate_max(struct kobject *kobj,
229 struct attribute *attr, char *buf)
231 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
232 "sysfs file is deprecated - used by: %s\n", current->comm);
233 return sprintf(buf, "%u\n", -1U);
236 static ssize_t show_sampling_rate_min(struct kobject *kobj,
237 struct attribute *attr, char *buf)
239 return sprintf(buf, "%u\n", min_sampling_rate);
242 #define define_one_ro(_name) \
243 static struct global_attr _name = \
244 __ATTR(_name, 0444, show_##_name, NULL)
246 define_one_ro(sampling_rate_max);
247 define_one_ro(sampling_rate_min);
249 /* cpufreq_ondemand Governor Tunables */
250 #define show_one(file_name, object) \
251 static ssize_t show_##file_name \
252 (struct kobject *kobj, struct attribute *attr, char *buf) \
254 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
256 show_one(sampling_rate, sampling_rate);
257 show_one(up_threshold, up_threshold);
258 show_one(ignore_nice_load, ignore_nice);
259 show_one(powersave_bias, powersave_bias);
261 /*** delete after deprecation time ***/
263 #define DEPRECATION_MSG(file_name) \
264 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
265 "interface is deprecated - " #file_name "\n");
267 #define show_one_old(file_name) \
268 static ssize_t show_##file_name##_old \
269 (struct cpufreq_policy *unused, char *buf) \
271 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
272 "interface is deprecated - " #file_name "\n"); \
273 return show_##file_name(NULL, NULL, buf); \
275 show_one_old(sampling_rate);
276 show_one_old(up_threshold);
277 show_one_old(ignore_nice_load);
278 show_one_old(powersave_bias);
279 show_one_old(sampling_rate_min);
280 show_one_old(sampling_rate_max);
282 #define define_one_ro_old(object, _name) \
283 static struct freq_attr object = \
284 __ATTR(_name, 0444, show_##_name##_old, NULL)
286 define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
287 define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
289 /*** delete after deprecation time ***/
291 static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
292 const char *buf, size_t count)
296 ret = sscanf(buf, "%u", &input);
300 mutex_lock(&dbs_mutex);
301 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
302 mutex_unlock(&dbs_mutex);
307 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
308 const char *buf, size_t count)
312 ret = sscanf(buf, "%u", &input);
314 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
315 input < MIN_FREQUENCY_UP_THRESHOLD) {
319 mutex_lock(&dbs_mutex);
320 dbs_tuners_ins.up_threshold = input;
321 mutex_unlock(&dbs_mutex);
326 static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
327 const char *buf, size_t count)
334 ret = sscanf(buf, "%u", &input);
341 mutex_lock(&dbs_mutex);
342 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
343 mutex_unlock(&dbs_mutex);
346 dbs_tuners_ins.ignore_nice = input;
348 /* we need to re-evaluate prev_cpu_idle */
349 for_each_online_cpu(j) {
350 struct cpu_dbs_info_s *dbs_info;
351 dbs_info = &per_cpu(od_cpu_dbs_info, j);
352 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
353 &dbs_info->prev_cpu_wall);
354 if (dbs_tuners_ins.ignore_nice)
355 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
358 mutex_unlock(&dbs_mutex);
363 static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
364 const char *buf, size_t count)
368 ret = sscanf(buf, "%u", &input);
376 mutex_lock(&dbs_mutex);
377 dbs_tuners_ins.powersave_bias = input;
378 ondemand_powersave_bias_init();
379 mutex_unlock(&dbs_mutex);
384 #define define_one_rw(_name) \
385 static struct global_attr _name = \
386 __ATTR(_name, 0644, show_##_name, store_##_name)
388 define_one_rw(sampling_rate);
389 define_one_rw(up_threshold);
390 define_one_rw(ignore_nice_load);
391 define_one_rw(powersave_bias);
393 static struct attribute *dbs_attributes[] = {
394 &sampling_rate_max.attr,
395 &sampling_rate_min.attr,
398 &ignore_nice_load.attr,
399 &powersave_bias.attr,
403 static struct attribute_group dbs_attr_group = {
404 .attrs = dbs_attributes,
408 /*** delete after deprecation time ***/
410 #define write_one_old(file_name) \
411 static ssize_t store_##file_name##_old \
412 (struct cpufreq_policy *unused, const char *buf, size_t count) \
414 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
415 "interface is deprecated - " #file_name "\n"); \
416 return store_##file_name(NULL, NULL, buf, count); \
418 write_one_old(sampling_rate);
419 write_one_old(up_threshold);
420 write_one_old(ignore_nice_load);
421 write_one_old(powersave_bias);
423 #define define_one_rw_old(object, _name) \
424 static struct freq_attr object = \
425 __ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
427 define_one_rw_old(sampling_rate_old, sampling_rate);
428 define_one_rw_old(up_threshold_old, up_threshold);
429 define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
430 define_one_rw_old(powersave_bias_old, powersave_bias);
432 static struct attribute *dbs_attributes_old[] = {
433 &sampling_rate_max_old.attr,
434 &sampling_rate_min_old.attr,
435 &sampling_rate_old.attr,
436 &up_threshold_old.attr,
437 &ignore_nice_load_old.attr,
438 &powersave_bias_old.attr,
442 static struct attribute_group dbs_attr_group_old = {
443 .attrs = dbs_attributes_old,
447 /*** delete after deprecation time ***/
449 /************************** sysfs end ************************/
451 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
453 unsigned int max_load_freq;
455 struct cpufreq_policy *policy;
458 this_dbs_info->freq_lo = 0;
459 policy = this_dbs_info->cur_policy;
462 * Every sampling_rate, we check, if current idle time is less
463 * than 20% (default), then we try to increase frequency
464 * Every sampling_rate, we look for a the lowest
465 * frequency which can sustain the load while keeping idle time over
466 * 30%. If such a frequency exist, we try to decrease to this frequency.
468 * Any frequency increase takes it to the maximum frequency.
469 * Frequency reduction happens at minimum steps of
470 * 5% (default) of current frequency
473 /* Get Absolute Load - in terms of freq */
476 for_each_cpu(j, policy->cpus) {
477 struct cpu_dbs_info_s *j_dbs_info;
478 cputime64_t cur_wall_time, cur_idle_time;
479 unsigned int idle_time, wall_time;
480 unsigned int load, load_freq;
483 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
485 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
487 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
488 j_dbs_info->prev_cpu_wall);
489 j_dbs_info->prev_cpu_wall = cur_wall_time;
491 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
492 j_dbs_info->prev_cpu_idle);
493 j_dbs_info->prev_cpu_idle = cur_idle_time;
495 if (dbs_tuners_ins.ignore_nice) {
496 cputime64_t cur_nice;
497 unsigned long cur_nice_jiffies;
499 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
500 j_dbs_info->prev_cpu_nice);
502 * Assumption: nice time between sampling periods will
503 * be less than 2^32 jiffies for 32 bit sys
505 cur_nice_jiffies = (unsigned long)
506 cputime64_to_jiffies64(cur_nice);
508 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
509 idle_time += jiffies_to_usecs(cur_nice_jiffies);
512 if (unlikely(!wall_time || wall_time < idle_time))
515 load = 100 * (wall_time - idle_time) / wall_time;
517 freq_avg = __cpufreq_driver_getavg(policy, j);
519 freq_avg = policy->cur;
521 load_freq = load * freq_avg;
522 if (load_freq > max_load_freq)
523 max_load_freq = load_freq;
526 /* Check for frequency increase */
527 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
528 /* if we are already at full speed then break out early */
529 if (!dbs_tuners_ins.powersave_bias) {
530 if (policy->cur == policy->max)
533 __cpufreq_driver_target(policy, policy->max,
536 int freq = powersave_bias_target(policy, policy->max,
538 __cpufreq_driver_target(policy, freq,
544 /* Check for frequency decrease */
545 /* if we cannot reduce the frequency anymore, break out early */
546 if (policy->cur == policy->min)
550 * The optimal frequency is the frequency that is the lowest that
551 * can support the current CPU usage without triggering the up
552 * policy. To be safe, we focus 10 points under the threshold.
555 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
557 unsigned int freq_next;
558 freq_next = max_load_freq /
559 (dbs_tuners_ins.up_threshold -
560 dbs_tuners_ins.down_differential);
562 if (!dbs_tuners_ins.powersave_bias) {
563 __cpufreq_driver_target(policy, freq_next,
566 int freq = powersave_bias_target(policy, freq_next,
568 __cpufreq_driver_target(policy, freq,
574 static void do_dbs_timer(struct work_struct *work)
576 struct cpu_dbs_info_s *dbs_info =
577 container_of(work, struct cpu_dbs_info_s, work.work);
578 unsigned int cpu = dbs_info->cpu;
579 int sample_type = dbs_info->sample_type;
581 /* We want all CPUs to do sampling nearly on same jiffy */
582 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
584 if (num_online_cpus() > 1)
585 delay -= jiffies % delay;
587 mutex_lock(&dbs_info->timer_mutex);
589 /* Common NORMAL_SAMPLE setup */
590 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
591 if (!dbs_tuners_ins.powersave_bias ||
592 sample_type == DBS_NORMAL_SAMPLE) {
593 dbs_check_cpu(dbs_info);
594 if (dbs_info->freq_lo) {
595 /* Setup timer for SUB_SAMPLE */
596 dbs_info->sample_type = DBS_SUB_SAMPLE;
597 delay = dbs_info->freq_hi_jiffies;
600 __cpufreq_driver_target(dbs_info->cur_policy,
601 dbs_info->freq_lo, CPUFREQ_RELATION_H);
603 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
604 mutex_unlock(&dbs_info->timer_mutex);
607 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
609 /* We want all CPUs to do sampling nearly on same jiffy */
610 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
611 delay -= jiffies % delay;
613 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
614 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
615 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
619 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
621 cancel_delayed_work_sync(&dbs_info->work);
624 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
627 unsigned int cpu = policy->cpu;
628 struct cpu_dbs_info_s *this_dbs_info;
632 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
635 case CPUFREQ_GOV_START:
636 if ((!cpu_online(cpu)) || (!policy->cur))
639 mutex_lock(&dbs_mutex);
641 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
643 mutex_unlock(&dbs_mutex);
648 for_each_cpu(j, policy->cpus) {
649 struct cpu_dbs_info_s *j_dbs_info;
650 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
651 j_dbs_info->cur_policy = policy;
653 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
654 &j_dbs_info->prev_cpu_wall);
655 if (dbs_tuners_ins.ignore_nice) {
656 j_dbs_info->prev_cpu_nice =
657 kstat_cpu(j).cpustat.nice;
660 this_dbs_info->cpu = cpu;
661 ondemand_powersave_bias_init_cpu(cpu);
663 * Start the timerschedule work, when this governor
664 * is used for first time
666 if (dbs_enable == 1) {
667 unsigned int latency;
669 rc = sysfs_create_group(cpufreq_global_kobject,
672 mutex_unlock(&dbs_mutex);
676 /* policy latency is in nS. Convert it to uS first */
677 latency = policy->cpuinfo.transition_latency / 1000;
680 /* Bring kernel and HW constraints together */
681 min_sampling_rate = max(min_sampling_rate,
682 MIN_LATENCY_MULTIPLIER * latency);
683 dbs_tuners_ins.sampling_rate =
684 max(min_sampling_rate,
685 latency * LATENCY_MULTIPLIER);
687 mutex_unlock(&dbs_mutex);
689 mutex_init(&this_dbs_info->timer_mutex);
690 dbs_timer_init(this_dbs_info);
693 case CPUFREQ_GOV_STOP:
694 dbs_timer_exit(this_dbs_info);
696 mutex_lock(&dbs_mutex);
697 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
698 mutex_destroy(&this_dbs_info->timer_mutex);
700 mutex_unlock(&dbs_mutex);
702 sysfs_remove_group(cpufreq_global_kobject,
707 case CPUFREQ_GOV_LIMITS:
708 mutex_lock(&this_dbs_info->timer_mutex);
709 if (policy->max < this_dbs_info->cur_policy->cur)
710 __cpufreq_driver_target(this_dbs_info->cur_policy,
711 policy->max, CPUFREQ_RELATION_H);
712 else if (policy->min > this_dbs_info->cur_policy->cur)
713 __cpufreq_driver_target(this_dbs_info->cur_policy,
714 policy->min, CPUFREQ_RELATION_L);
715 mutex_unlock(&this_dbs_info->timer_mutex);
721 static int __init cpufreq_gov_dbs_init(void)
728 idle_time = get_cpu_idle_time_us(cpu, &wall);
730 if (idle_time != -1ULL) {
731 /* Idle micro accounting is supported. Use finer thresholds */
732 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
733 dbs_tuners_ins.down_differential =
734 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
736 * In no_hz/micro accounting case we set the minimum frequency
737 * not depending on HZ, but fixed (very low). The deferred
738 * timer might skip some samples if idle/sleeping as needed.
740 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
742 /* For correct statistics, we need 10 ticks for each measure */
744 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
747 kondemand_wq = create_workqueue("kondemand");
749 printk(KERN_ERR "Creation of kondemand failed\n");
752 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
754 destroy_workqueue(kondemand_wq);
759 static void __exit cpufreq_gov_dbs_exit(void)
761 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
762 destroy_workqueue(kondemand_wq);
766 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
767 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
768 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
769 "Low Latency Frequency Transition capable processors");
770 MODULE_LICENSE("GPL");
772 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
773 fs_initcall(cpufreq_gov_dbs_init);
775 module_init(cpufreq_gov_dbs_init);
777 module_exit(cpufreq_gov_dbs_exit);