2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/cpufreq_interactive.h>
36 #include <asm/cputime.h>
38 static atomic_t active_count = ATOMIC_INIT(0);
40 struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
48 u64 freq_change_time_in_idle;
49 struct cpufreq_policy *policy;
50 struct cpufreq_frequency_table *freq_table;
51 unsigned int target_freq;
55 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57 /* Workqueues handle frequency scaling */
58 static struct task_struct *up_task;
59 static struct workqueue_struct *down_wq;
60 static struct work_struct freq_scale_down_work;
61 static cpumask_t up_cpumask;
62 static spinlock_t up_cpumask_lock;
63 static cpumask_t down_cpumask;
64 static spinlock_t down_cpumask_lock;
65 static struct mutex set_speed_lock;
67 /* Hi speed to bump to from lo speed when load burst (default max) */
68 static u64 hispeed_freq;
70 /* Go to hi speed when CPU load at or above this value. */
71 #define DEFAULT_GO_HISPEED_LOAD 95
72 static unsigned long go_hispeed_load;
75 * The minimum amount of time to spend at a frequency before we can ramp down.
77 #define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
78 static unsigned long min_sample_time;
81 * The sample rate of the timer used to increase frequency
83 #define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
84 static unsigned long timer_rate;
86 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
89 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
92 struct cpufreq_governor cpufreq_gov_interactive = {
93 .name = "interactive",
94 .governor = cpufreq_governor_interactive,
95 .max_transition_latency = 10000000,
99 static void cpufreq_interactive_timer(unsigned long data)
101 unsigned int delta_idle;
102 unsigned int delta_time;
104 int load_since_change;
107 struct cpufreq_interactive_cpuinfo *pcpu =
108 &per_cpu(cpuinfo, data);
110 unsigned int new_freq;
116 if (!pcpu->governor_enabled)
120 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
121 * this lets idle exit know the current idle time sample has
122 * been processed, and idle exit can generate a new sample and
123 * re-arm the timer. This prevents a concurrent idle
124 * exit on that CPU from writing a new set of info at the same time
125 * the timer function runs (the timer function can't use that info
126 * until more time passes).
128 time_in_idle = pcpu->time_in_idle;
129 idle_exit_time = pcpu->idle_exit_time;
130 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
133 /* If we raced with cancelling a timer, skip. */
137 delta_idle = (unsigned int)(now_idle - time_in_idle);
138 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
141 * If timer ran less than 1ms after short-term sample started, retry.
143 if (delta_time < 1000)
146 if (delta_idle > delta_time)
149 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
151 delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
152 delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
154 if ((delta_time == 0) || (delta_idle > delta_time))
155 load_since_change = 0;
158 100 * (delta_time - delta_idle) / delta_time;
161 * Choose greater of short-term load (since last idle timer
162 * started or timer function re-armed itself) or long-term load
163 * (since last frequency change).
165 if (load_since_change > cpu_load)
166 cpu_load = load_since_change;
168 if (cpu_load >= go_hispeed_load) {
169 if (pcpu->policy->cur == pcpu->policy->min) {
170 new_freq = hispeed_freq;
172 new_freq = pcpu->policy->max * cpu_load / 100;
174 if (new_freq < hispeed_freq)
175 new_freq = hispeed_freq;
178 new_freq = pcpu->policy->max * cpu_load / 100;
181 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
182 new_freq, CPUFREQ_RELATION_H,
184 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
189 new_freq = pcpu->freq_table[index].frequency;
191 if (pcpu->target_freq == new_freq)
193 trace_cpufreq_interactive_already(data, cpu_load,
194 pcpu->target_freq, new_freq);
195 goto rearm_if_notmax;
199 * Do not scale down unless we have been at this frequency for the
200 * minimum sample time.
202 if (new_freq < pcpu->target_freq) {
203 if (pcpu->timer_run_time - pcpu->freq_change_time
205 trace_cpufreq_interactive_notyet(data, cpu_load,
206 pcpu->target_freq, new_freq);
211 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
214 if (new_freq < pcpu->target_freq) {
215 pcpu->target_freq = new_freq;
216 spin_lock_irqsave(&down_cpumask_lock, flags);
217 cpumask_set_cpu(data, &down_cpumask);
218 spin_unlock_irqrestore(&down_cpumask_lock, flags);
219 queue_work(down_wq, &freq_scale_down_work);
221 pcpu->target_freq = new_freq;
222 spin_lock_irqsave(&up_cpumask_lock, flags);
223 cpumask_set_cpu(data, &up_cpumask);
224 spin_unlock_irqrestore(&up_cpumask_lock, flags);
225 wake_up_process(up_task);
230 * Already set max speed and don't see a need to change that,
231 * wait until next idle to re-evaluate, don't need timer.
233 if (pcpu->target_freq == pcpu->policy->max)
237 if (!timer_pending(&pcpu->cpu_timer)) {
239 * If already at min: if that CPU is idle, don't set timer.
240 * Else cancel the timer if that CPU goes idle. We don't
241 * need to re-evaluate speed until the next idle exit.
243 if (pcpu->target_freq == pcpu->policy->min) {
249 pcpu->timer_idlecancel = 1;
252 pcpu->time_in_idle = get_cpu_idle_time_us(
253 data, &pcpu->idle_exit_time);
254 mod_timer(&pcpu->cpu_timer,
255 jiffies + usecs_to_jiffies(timer_rate));
262 static void cpufreq_interactive_idle_start(void)
264 struct cpufreq_interactive_cpuinfo *pcpu =
265 &per_cpu(cpuinfo, smp_processor_id());
268 if (!pcpu->governor_enabled)
273 pending = timer_pending(&pcpu->cpu_timer);
275 if (pcpu->target_freq != pcpu->policy->min) {
278 * Entering idle while not at lowest speed. On some
279 * platforms this can hold the other CPU(s) at that speed
280 * even though the CPU is idle. Set a timer to re-evaluate
281 * speed so this idle CPU doesn't hold the other CPUs above
282 * min indefinitely. This should probably be a quirk of
283 * the CPUFreq driver.
286 pcpu->time_in_idle = get_cpu_idle_time_us(
287 smp_processor_id(), &pcpu->idle_exit_time);
288 pcpu->timer_idlecancel = 0;
289 mod_timer(&pcpu->cpu_timer,
290 jiffies + usecs_to_jiffies(timer_rate));
295 * If at min speed and entering idle after load has
296 * already been evaluated, and a timer has been set just in
297 * case the CPU suddenly goes busy, cancel that timer. The
298 * CPU didn't go busy; we'll recheck things upon idle exit.
300 if (pending && pcpu->timer_idlecancel) {
301 del_timer(&pcpu->cpu_timer);
303 * Ensure last timer run time is after current idle
304 * sample start time, so next idle exit will always
305 * start a new idle sampling period.
307 pcpu->idle_exit_time = 0;
308 pcpu->timer_idlecancel = 0;
314 static void cpufreq_interactive_idle_end(void)
316 struct cpufreq_interactive_cpuinfo *pcpu =
317 &per_cpu(cpuinfo, smp_processor_id());
323 * Arm the timer for 1-2 ticks later if not already, and if the timer
324 * function has already processed the previous load sampling
325 * interval. (If the timer is not pending but has not processed
326 * the previous interval, it is probably racing with us on another
327 * CPU. Let it compute load based on the previous sample and then
328 * re-arm the timer for another interval when it's done, rather
329 * than updating the interval start time to be "now", which doesn't
330 * give the timer function enough time to make a decision on this
333 if (timer_pending(&pcpu->cpu_timer) == 0 &&
334 pcpu->timer_run_time >= pcpu->idle_exit_time &&
335 pcpu->governor_enabled) {
337 get_cpu_idle_time_us(smp_processor_id(),
338 &pcpu->idle_exit_time);
339 pcpu->timer_idlecancel = 0;
340 mod_timer(&pcpu->cpu_timer,
341 jiffies + usecs_to_jiffies(timer_rate));
346 static int cpufreq_interactive_up_task(void *data)
351 struct cpufreq_interactive_cpuinfo *pcpu;
354 set_current_state(TASK_INTERRUPTIBLE);
355 spin_lock_irqsave(&up_cpumask_lock, flags);
357 if (cpumask_empty(&up_cpumask)) {
358 spin_unlock_irqrestore(&up_cpumask_lock, flags);
361 if (kthread_should_stop())
364 spin_lock_irqsave(&up_cpumask_lock, flags);
367 set_current_state(TASK_RUNNING);
368 tmp_mask = up_cpumask;
369 cpumask_clear(&up_cpumask);
370 spin_unlock_irqrestore(&up_cpumask_lock, flags);
372 for_each_cpu(cpu, &tmp_mask) {
374 unsigned int max_freq = 0;
376 pcpu = &per_cpu(cpuinfo, cpu);
379 if (!pcpu->governor_enabled)
382 mutex_lock(&set_speed_lock);
384 for_each_cpu(j, pcpu->policy->cpus) {
385 struct cpufreq_interactive_cpuinfo *pjcpu =
386 &per_cpu(cpuinfo, j);
388 if (pjcpu->target_freq > max_freq)
389 max_freq = pjcpu->target_freq;
392 if (max_freq != pcpu->policy->cur)
393 __cpufreq_driver_target(pcpu->policy,
396 mutex_unlock(&set_speed_lock);
398 pcpu->freq_change_time_in_idle =
399 get_cpu_idle_time_us(cpu,
400 &pcpu->freq_change_time);
401 trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
409 static void cpufreq_interactive_freq_down(struct work_struct *work)
414 struct cpufreq_interactive_cpuinfo *pcpu;
416 spin_lock_irqsave(&down_cpumask_lock, flags);
417 tmp_mask = down_cpumask;
418 cpumask_clear(&down_cpumask);
419 spin_unlock_irqrestore(&down_cpumask_lock, flags);
421 for_each_cpu(cpu, &tmp_mask) {
423 unsigned int max_freq = 0;
425 pcpu = &per_cpu(cpuinfo, cpu);
428 if (!pcpu->governor_enabled)
431 mutex_lock(&set_speed_lock);
433 for_each_cpu(j, pcpu->policy->cpus) {
434 struct cpufreq_interactive_cpuinfo *pjcpu =
435 &per_cpu(cpuinfo, j);
437 if (pjcpu->target_freq > max_freq)
438 max_freq = pjcpu->target_freq;
441 if (max_freq != pcpu->policy->cur)
442 __cpufreq_driver_target(pcpu->policy, max_freq,
445 mutex_unlock(&set_speed_lock);
446 pcpu->freq_change_time_in_idle =
447 get_cpu_idle_time_us(cpu,
448 &pcpu->freq_change_time);
449 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
454 static ssize_t show_hispeed_freq(struct kobject *kobj,
455 struct attribute *attr, char *buf)
457 return sprintf(buf, "%llu\n", hispeed_freq);
460 static ssize_t store_hispeed_freq(struct kobject *kobj,
461 struct attribute *attr, const char *buf,
467 ret = strict_strtoull(buf, 0, &val);
474 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
475 show_hispeed_freq, store_hispeed_freq);
478 static ssize_t show_go_hispeed_load(struct kobject *kobj,
479 struct attribute *attr, char *buf)
481 return sprintf(buf, "%lu\n", go_hispeed_load);
484 static ssize_t store_go_hispeed_load(struct kobject *kobj,
485 struct attribute *attr, const char *buf, size_t count)
490 ret = strict_strtoul(buf, 0, &val);
493 go_hispeed_load = val;
497 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
498 show_go_hispeed_load, store_go_hispeed_load);
500 static ssize_t show_min_sample_time(struct kobject *kobj,
501 struct attribute *attr, char *buf)
503 return sprintf(buf, "%lu\n", min_sample_time);
506 static ssize_t store_min_sample_time(struct kobject *kobj,
507 struct attribute *attr, const char *buf, size_t count)
512 ret = strict_strtoul(buf, 0, &val);
515 min_sample_time = val;
519 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
520 show_min_sample_time, store_min_sample_time);
522 static ssize_t show_timer_rate(struct kobject *kobj,
523 struct attribute *attr, char *buf)
525 return sprintf(buf, "%lu\n", timer_rate);
528 static ssize_t store_timer_rate(struct kobject *kobj,
529 struct attribute *attr, const char *buf, size_t count)
534 ret = strict_strtoul(buf, 0, &val);
541 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
542 show_timer_rate, store_timer_rate);
544 static struct attribute *interactive_attributes[] = {
545 &hispeed_freq_attr.attr,
546 &go_hispeed_load_attr.attr,
547 &min_sample_time_attr.attr,
548 &timer_rate_attr.attr,
552 static struct attribute_group interactive_attr_group = {
553 .attrs = interactive_attributes,
554 .name = "interactive",
557 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
562 struct cpufreq_interactive_cpuinfo *pcpu;
563 struct cpufreq_frequency_table *freq_table;
566 case CPUFREQ_GOV_START:
567 if (!cpu_online(policy->cpu))
571 cpufreq_frequency_get_table(policy->cpu);
573 for_each_cpu(j, policy->cpus) {
574 pcpu = &per_cpu(cpuinfo, j);
575 pcpu->policy = policy;
576 pcpu->target_freq = policy->cur;
577 pcpu->freq_table = freq_table;
578 pcpu->freq_change_time_in_idle =
579 get_cpu_idle_time_us(j,
580 &pcpu->freq_change_time);
581 pcpu->governor_enabled = 1;
586 hispeed_freq = policy->max;
589 * Do not register the idle hook and create sysfs
590 * entries if we have already done so.
592 if (atomic_inc_return(&active_count) > 1)
595 rc = sysfs_create_group(cpufreq_global_kobject,
596 &interactive_attr_group);
602 case CPUFREQ_GOV_STOP:
603 for_each_cpu(j, policy->cpus) {
604 pcpu = &per_cpu(cpuinfo, j);
605 pcpu->governor_enabled = 0;
607 del_timer_sync(&pcpu->cpu_timer);
610 * Reset idle exit time since we may cancel the timer
611 * before it can run after the last idle exit time,
612 * to avoid tripping the check in idle exit for a timer
613 * that is trying to run.
615 pcpu->idle_exit_time = 0;
618 flush_work(&freq_scale_down_work);
619 if (atomic_dec_return(&active_count) > 0)
622 sysfs_remove_group(cpufreq_global_kobject,
623 &interactive_attr_group);
627 case CPUFREQ_GOV_LIMITS:
628 if (policy->max < policy->cur)
629 __cpufreq_driver_target(policy,
630 policy->max, CPUFREQ_RELATION_H);
631 else if (policy->min > policy->cur)
632 __cpufreq_driver_target(policy,
633 policy->min, CPUFREQ_RELATION_L);
639 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
645 cpufreq_interactive_idle_start();
648 cpufreq_interactive_idle_end();
655 static struct notifier_block cpufreq_interactive_idle_nb = {
656 .notifier_call = cpufreq_interactive_idle_notifier,
659 static int __init cpufreq_interactive_init(void)
662 struct cpufreq_interactive_cpuinfo *pcpu;
663 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
665 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
666 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
667 timer_rate = DEFAULT_TIMER_RATE;
669 /* Initalize per-cpu timers */
670 for_each_possible_cpu(i) {
671 pcpu = &per_cpu(cpuinfo, i);
672 init_timer(&pcpu->cpu_timer);
673 pcpu->cpu_timer.function = cpufreq_interactive_timer;
674 pcpu->cpu_timer.data = i;
677 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
680 return PTR_ERR(up_task);
682 sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m);
683 get_task_struct(up_task);
685 /* No rescuer thread, bind to CPU queuing the work for possibly
686 warm cache (probably doesn't matter much). */
687 down_wq = alloc_workqueue("knteractive_down", 0, 1);
692 INIT_WORK(&freq_scale_down_work,
693 cpufreq_interactive_freq_down);
695 spin_lock_init(&up_cpumask_lock);
696 spin_lock_init(&down_cpumask_lock);
697 mutex_init(&set_speed_lock);
699 idle_notifier_register(&cpufreq_interactive_idle_nb);
701 return cpufreq_register_governor(&cpufreq_gov_interactive);
704 put_task_struct(up_task);
708 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
709 fs_initcall(cpufreq_interactive_init);
711 module_init(cpufreq_interactive_init);
714 static void __exit cpufreq_interactive_exit(void)
716 cpufreq_unregister_governor(&cpufreq_gov_interactive);
717 kthread_stop(up_task);
718 put_task_struct(up_task);
719 destroy_workqueue(down_wq);
722 module_exit(cpufreq_interactive_exit);
724 MODULE_AUTHOR("Mike Chan <mike@android.com>");
725 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
726 "Latency sensitive workloads");
727 MODULE_LICENSE("GPL");