2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <linux/kernel_stat.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 static int active_count;
41 struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
43 struct timer_list cpu_slack_timer;
44 spinlock_t load_lock; /* protects the next 4 fields */
46 u64 time_in_idle_timestamp;
48 u64 cputime_speedadj_timestamp;
49 struct cpufreq_policy *policy;
50 struct cpufreq_frequency_table *freq_table;
51 unsigned int target_freq;
52 unsigned int floor_freq;
53 u64 floor_validate_time;
54 u64 hispeed_validate_time;
55 struct rw_semaphore enable_sem;
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
61 /* realtime thread handles frequency scaling */
62 static struct task_struct *speedchange_task;
63 static cpumask_t speedchange_cpumask;
64 static spinlock_t speedchange_cpumask_lock;
65 static struct mutex gov_lock;
67 /* Hi speed to bump to from lo speed when load burst (default max) */
68 static unsigned int hispeed_freq;
70 /* Go to hi speed when CPU load at or above this value. */
71 #define DEFAULT_GO_HISPEED_LOAD 99
72 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
74 /* Target load. Lower values result in higher CPU speeds. */
75 #define DEFAULT_TARGET_LOAD 90
76 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
77 static spinlock_t target_loads_lock;
78 static unsigned int *target_loads = default_target_loads;
79 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
82 * The minimum amount of time to spend at a frequency before we can ramp down.
84 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
85 static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
88 * The sample rate of the timer used to increase frequency
90 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
91 static unsigned long timer_rate = DEFAULT_TIMER_RATE;
94 * Wait this long before raising speed above hispeed, by default a single
97 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
98 static unsigned int default_above_hispeed_delay[] = {
99 DEFAULT_ABOVE_HISPEED_DELAY };
100 static spinlock_t above_hispeed_delay_lock;
101 static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
102 static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
104 /* Non-zero means indefinite speed boost active */
105 static int boost_val;
106 /* Duration of a boot pulse in usecs */
107 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
108 /* End time of boost pulse in ktime converted to usecs */
109 static u64 boostpulse_endtime;
112 * Max additional time to wait in idle, beyond timer_rate, at speeds above
113 * minimum before wakeup to reduce speed, or -1 if unnecessary.
115 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
116 static int timer_slack_val = DEFAULT_TIMER_SLACK;
118 static bool io_is_busy;
120 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
127 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
129 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
136 idle_time = cur_wall_time - busy_time;
138 *wall = jiffies_to_usecs(cur_wall_time);
140 return jiffies_to_usecs(idle_time);
143 static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
146 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
148 if (idle_time == -1ULL)
149 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
150 else if (!io_is_busy)
151 idle_time += get_cpu_iowait_time_us(cpu, wall);
156 static void cpufreq_interactive_timer_resched(
157 struct cpufreq_interactive_cpuinfo *pcpu)
159 unsigned long expires;
162 spin_lock_irqsave(&pcpu->load_lock, flags);
164 get_cpu_idle_time(smp_processor_id(),
165 &pcpu->time_in_idle_timestamp);
166 pcpu->cputime_speedadj = 0;
167 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
168 expires = jiffies + usecs_to_jiffies(timer_rate);
169 mod_timer_pinned(&pcpu->cpu_timer, expires);
171 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
172 expires += usecs_to_jiffies(timer_slack_val);
173 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
176 spin_unlock_irqrestore(&pcpu->load_lock, flags);
179 /* The caller shall take enable_sem write semaphore to avoid any timer race.
180 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
183 static void cpufreq_interactive_timer_start(int cpu)
185 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
186 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
189 pcpu->cpu_timer.expires = expires;
190 add_timer_on(&pcpu->cpu_timer, cpu);
191 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
192 expires += usecs_to_jiffies(timer_slack_val);
193 pcpu->cpu_slack_timer.expires = expires;
194 add_timer_on(&pcpu->cpu_slack_timer, cpu);
197 spin_lock_irqsave(&pcpu->load_lock, flags);
199 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
200 pcpu->cputime_speedadj = 0;
201 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
202 spin_unlock_irqrestore(&pcpu->load_lock, flags);
205 static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
211 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
213 for (i = 0; i < nabove_hispeed_delay - 1 &&
214 freq >= above_hispeed_delay[i+1]; i += 2)
217 ret = above_hispeed_delay[i];
218 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
222 static unsigned int freq_to_targetload(unsigned int freq)
228 spin_lock_irqsave(&target_loads_lock, flags);
230 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
233 ret = target_loads[i];
234 spin_unlock_irqrestore(&target_loads_lock, flags);
239 * If increasing frequencies never map to a lower target load then
240 * choose_freq() will find the minimum frequency that does not exceed its
241 * target load given the current load.
244 static unsigned int choose_freq(
245 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
247 unsigned int freq = pcpu->policy->cur;
248 unsigned int prevfreq, freqmin, freqmax;
257 tl = freq_to_targetload(freq);
260 * Find the lowest frequency where the computed load is less
261 * than or equal to the target load.
264 if (cpufreq_frequency_table_target(
265 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
266 CPUFREQ_RELATION_L, &index))
268 freq = pcpu->freq_table[index].frequency;
270 if (freq > prevfreq) {
271 /* The previous frequency is too low. */
274 if (freq >= freqmax) {
276 * Find the highest frequency that is less
279 if (cpufreq_frequency_table_target(
280 pcpu->policy, pcpu->freq_table,
281 freqmax - 1, CPUFREQ_RELATION_H,
284 freq = pcpu->freq_table[index].frequency;
286 if (freq == freqmin) {
288 * The first frequency below freqmax
289 * has already been found to be too
290 * low. freqmax is the lowest speed
291 * we found that is fast enough.
297 } else if (freq < prevfreq) {
298 /* The previous frequency is high enough. */
301 if (freq <= freqmin) {
303 * Find the lowest frequency that is higher
306 if (cpufreq_frequency_table_target(
307 pcpu->policy, pcpu->freq_table,
308 freqmin + 1, CPUFREQ_RELATION_L,
311 freq = pcpu->freq_table[index].frequency;
314 * If freqmax is the first frequency above
315 * freqmin then we have already found that
316 * this speed is fast enough.
323 /* If same frequency chosen as previous then done. */
324 } while (freq != prevfreq);
329 static u64 update_load(int cpu)
331 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
334 unsigned int delta_idle;
335 unsigned int delta_time;
338 now_idle = get_cpu_idle_time(cpu, &now);
339 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
340 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
342 if (delta_time <= delta_idle)
345 active_time = delta_time - delta_idle;
347 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
349 pcpu->time_in_idle = now_idle;
350 pcpu->time_in_idle_timestamp = now;
354 static void cpufreq_interactive_timer(unsigned long data)
357 unsigned int delta_time;
358 u64 cputime_speedadj;
360 struct cpufreq_interactive_cpuinfo *pcpu =
361 &per_cpu(cpuinfo, data);
362 unsigned int new_freq;
363 unsigned int loadadjfreq;
368 if (!down_read_trylock(&pcpu->enable_sem))
370 if (!pcpu->governor_enabled)
373 spin_lock_irqsave(&pcpu->load_lock, flags);
374 now = update_load(data);
375 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
376 cputime_speedadj = pcpu->cputime_speedadj;
377 spin_unlock_irqrestore(&pcpu->load_lock, flags);
379 if (WARN_ON_ONCE(!delta_time))
382 do_div(cputime_speedadj, delta_time);
383 loadadjfreq = (unsigned int)cputime_speedadj * 100;
384 cpu_load = loadadjfreq / pcpu->target_freq;
385 boosted = boost_val || now < boostpulse_endtime;
387 if (cpu_load >= go_hispeed_load || boosted) {
388 if (pcpu->target_freq < hispeed_freq) {
389 new_freq = hispeed_freq;
391 new_freq = choose_freq(pcpu, loadadjfreq);
393 if (new_freq < hispeed_freq)
394 new_freq = hispeed_freq;
397 new_freq = choose_freq(pcpu, loadadjfreq);
400 if (pcpu->target_freq >= hispeed_freq &&
401 new_freq > pcpu->target_freq &&
402 now - pcpu->hispeed_validate_time <
403 freq_to_above_hispeed_delay(pcpu->target_freq)) {
404 trace_cpufreq_interactive_notyet(
405 data, cpu_load, pcpu->target_freq,
406 pcpu->policy->cur, new_freq);
410 pcpu->hispeed_validate_time = now;
412 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
413 new_freq, CPUFREQ_RELATION_L,
417 new_freq = pcpu->freq_table[index].frequency;
420 * Do not scale below floor_freq unless we have been at or above the
421 * floor frequency for the minimum sample time since last validated.
423 if (new_freq < pcpu->floor_freq) {
424 if (now - pcpu->floor_validate_time < min_sample_time) {
425 trace_cpufreq_interactive_notyet(
426 data, cpu_load, pcpu->target_freq,
427 pcpu->policy->cur, new_freq);
433 * Update the timestamp for checking whether speed has been held at
434 * or above the selected frequency for a minimum of min_sample_time,
435 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
436 * allow the speed to drop as soon as the boostpulse duration expires
437 * (or the indefinite boost is turned off).
440 if (!boosted || new_freq > hispeed_freq) {
441 pcpu->floor_freq = new_freq;
442 pcpu->floor_validate_time = now;
445 if (pcpu->target_freq == new_freq) {
446 trace_cpufreq_interactive_already(
447 data, cpu_load, pcpu->target_freq,
448 pcpu->policy->cur, new_freq);
449 goto rearm_if_notmax;
452 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
453 pcpu->policy->cur, new_freq);
455 pcpu->target_freq = new_freq;
456 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
457 cpumask_set_cpu(data, &speedchange_cpumask);
458 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
459 wake_up_process(speedchange_task);
463 * Already set max speed and don't see a need to change that,
464 * wait until next idle to re-evaluate, don't need timer.
466 if (pcpu->target_freq == pcpu->policy->max)
470 if (!timer_pending(&pcpu->cpu_timer))
471 cpufreq_interactive_timer_resched(pcpu);
474 up_read(&pcpu->enable_sem);
478 static void cpufreq_interactive_idle_start(void)
480 struct cpufreq_interactive_cpuinfo *pcpu =
481 &per_cpu(cpuinfo, smp_processor_id());
484 if (!down_read_trylock(&pcpu->enable_sem))
486 if (!pcpu->governor_enabled) {
487 up_read(&pcpu->enable_sem);
491 pending = timer_pending(&pcpu->cpu_timer);
493 if (pcpu->target_freq != pcpu->policy->min) {
495 * Entering idle while not at lowest speed. On some
496 * platforms this can hold the other CPU(s) at that speed
497 * even though the CPU is idle. Set a timer to re-evaluate
498 * speed so this idle CPU doesn't hold the other CPUs above
499 * min indefinitely. This should probably be a quirk of
500 * the CPUFreq driver.
503 cpufreq_interactive_timer_resched(pcpu);
506 up_read(&pcpu->enable_sem);
509 static void cpufreq_interactive_idle_end(void)
511 struct cpufreq_interactive_cpuinfo *pcpu =
512 &per_cpu(cpuinfo, smp_processor_id());
514 if (!down_read_trylock(&pcpu->enable_sem))
516 if (!pcpu->governor_enabled) {
517 up_read(&pcpu->enable_sem);
521 /* Arm the timer for 1-2 ticks later if not already. */
522 if (!timer_pending(&pcpu->cpu_timer)) {
523 cpufreq_interactive_timer_resched(pcpu);
524 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
525 del_timer(&pcpu->cpu_timer);
526 del_timer(&pcpu->cpu_slack_timer);
527 cpufreq_interactive_timer(smp_processor_id());
530 up_read(&pcpu->enable_sem);
533 static int cpufreq_interactive_speedchange_task(void *data)
538 struct cpufreq_interactive_cpuinfo *pcpu;
541 set_current_state(TASK_INTERRUPTIBLE);
542 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
544 if (cpumask_empty(&speedchange_cpumask)) {
545 spin_unlock_irqrestore(&speedchange_cpumask_lock,
549 if (kthread_should_stop())
552 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
555 set_current_state(TASK_RUNNING);
556 tmp_mask = speedchange_cpumask;
557 cpumask_clear(&speedchange_cpumask);
558 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
560 for_each_cpu(cpu, &tmp_mask) {
562 unsigned int max_freq = 0;
564 pcpu = &per_cpu(cpuinfo, cpu);
565 if (!down_read_trylock(&pcpu->enable_sem))
567 if (!pcpu->governor_enabled) {
568 up_read(&pcpu->enable_sem);
572 for_each_cpu(j, pcpu->policy->cpus) {
573 struct cpufreq_interactive_cpuinfo *pjcpu =
574 &per_cpu(cpuinfo, j);
576 if (pjcpu->target_freq > max_freq)
577 max_freq = pjcpu->target_freq;
580 if (max_freq != pcpu->policy->cur)
581 __cpufreq_driver_target(pcpu->policy,
584 trace_cpufreq_interactive_setspeed(cpu,
588 up_read(&pcpu->enable_sem);
595 static void cpufreq_interactive_boost(void)
600 struct cpufreq_interactive_cpuinfo *pcpu;
602 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
604 for_each_online_cpu(i) {
605 pcpu = &per_cpu(cpuinfo, i);
607 if (pcpu->target_freq < hispeed_freq) {
608 pcpu->target_freq = hispeed_freq;
609 cpumask_set_cpu(i, &speedchange_cpumask);
610 pcpu->hispeed_validate_time =
611 ktime_to_us(ktime_get());
616 * Set floor freq and (re)start timer for when last
620 pcpu->floor_freq = hispeed_freq;
621 pcpu->floor_validate_time = ktime_to_us(ktime_get());
624 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
627 wake_up_process(speedchange_task);
630 static int cpufreq_interactive_notifier(
631 struct notifier_block *nb, unsigned long val, void *data)
633 struct cpufreq_freqs *freq = data;
634 struct cpufreq_interactive_cpuinfo *pcpu;
638 if (val == CPUFREQ_POSTCHANGE) {
639 pcpu = &per_cpu(cpuinfo, freq->cpu);
640 if (!down_read_trylock(&pcpu->enable_sem))
642 if (!pcpu->governor_enabled) {
643 up_read(&pcpu->enable_sem);
647 for_each_cpu(cpu, pcpu->policy->cpus) {
648 struct cpufreq_interactive_cpuinfo *pjcpu =
649 &per_cpu(cpuinfo, cpu);
650 if (cpu != freq->cpu) {
651 if (!down_read_trylock(&pjcpu->enable_sem))
653 if (!pjcpu->governor_enabled) {
654 up_read(&pjcpu->enable_sem);
658 spin_lock_irqsave(&pjcpu->load_lock, flags);
660 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
661 if (cpu != freq->cpu)
662 up_read(&pjcpu->enable_sem);
665 up_read(&pcpu->enable_sem);
670 static struct notifier_block cpufreq_notifier_block = {
671 .notifier_call = cpufreq_interactive_notifier,
674 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
679 unsigned int *tokenized_data;
683 while ((cp = strpbrk(cp + 1, " :")))
686 if (!(ntokens & 0x1))
689 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
690 if (!tokenized_data) {
697 while (i < ntokens) {
698 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
701 cp = strpbrk(cp, " :");
710 *num_tokens = ntokens;
711 return tokenized_data;
714 kfree(tokenized_data);
719 static ssize_t show_target_loads(
720 struct kobject *kobj, struct attribute *attr, char *buf)
726 spin_lock_irqsave(&target_loads_lock, flags);
728 for (i = 0; i < ntarget_loads; i++)
729 ret += sprintf(buf + ret, "%u%s", target_loads[i],
730 i & 0x1 ? ":" : " ");
732 ret += sprintf(buf + --ret, "\n");
733 spin_unlock_irqrestore(&target_loads_lock, flags);
737 static ssize_t store_target_loads(
738 struct kobject *kobj, struct attribute *attr, const char *buf,
742 unsigned int *new_target_loads = NULL;
745 new_target_loads = get_tokenized_data(buf, &ntokens);
746 if (IS_ERR(new_target_loads))
747 return PTR_RET(new_target_loads);
749 spin_lock_irqsave(&target_loads_lock, flags);
750 if (target_loads != default_target_loads)
752 target_loads = new_target_loads;
753 ntarget_loads = ntokens;
754 spin_unlock_irqrestore(&target_loads_lock, flags);
758 static struct global_attr target_loads_attr =
759 __ATTR(target_loads, S_IRUGO | S_IWUSR,
760 show_target_loads, store_target_loads);
762 static ssize_t show_above_hispeed_delay(
763 struct kobject *kobj, struct attribute *attr, char *buf)
769 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
771 for (i = 0; i < nabove_hispeed_delay; i++)
772 ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
773 i & 0x1 ? ":" : " ");
775 ret += sprintf(buf + --ret, "\n");
776 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
780 static ssize_t store_above_hispeed_delay(
781 struct kobject *kobj, struct attribute *attr, const char *buf,
785 unsigned int *new_above_hispeed_delay = NULL;
788 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
789 if (IS_ERR(new_above_hispeed_delay))
790 return PTR_RET(new_above_hispeed_delay);
792 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
793 if (above_hispeed_delay != default_above_hispeed_delay)
794 kfree(above_hispeed_delay);
795 above_hispeed_delay = new_above_hispeed_delay;
796 nabove_hispeed_delay = ntokens;
797 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
802 static struct global_attr above_hispeed_delay_attr =
803 __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
804 show_above_hispeed_delay, store_above_hispeed_delay);
806 static ssize_t show_hispeed_freq(struct kobject *kobj,
807 struct attribute *attr, char *buf)
809 return sprintf(buf, "%u\n", hispeed_freq);
812 static ssize_t store_hispeed_freq(struct kobject *kobj,
813 struct attribute *attr, const char *buf,
817 long unsigned int val;
819 ret = strict_strtoul(buf, 0, &val);
826 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
827 show_hispeed_freq, store_hispeed_freq);
830 static ssize_t show_go_hispeed_load(struct kobject *kobj,
831 struct attribute *attr, char *buf)
833 return sprintf(buf, "%lu\n", go_hispeed_load);
836 static ssize_t store_go_hispeed_load(struct kobject *kobj,
837 struct attribute *attr, const char *buf, size_t count)
842 ret = strict_strtoul(buf, 0, &val);
845 go_hispeed_load = val;
849 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
850 show_go_hispeed_load, store_go_hispeed_load);
852 static ssize_t show_min_sample_time(struct kobject *kobj,
853 struct attribute *attr, char *buf)
855 return sprintf(buf, "%lu\n", min_sample_time);
858 static ssize_t store_min_sample_time(struct kobject *kobj,
859 struct attribute *attr, const char *buf, size_t count)
864 ret = strict_strtoul(buf, 0, &val);
867 min_sample_time = val;
871 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
872 show_min_sample_time, store_min_sample_time);
874 static ssize_t show_timer_rate(struct kobject *kobj,
875 struct attribute *attr, char *buf)
877 return sprintf(buf, "%lu\n", timer_rate);
880 static ssize_t store_timer_rate(struct kobject *kobj,
881 struct attribute *attr, const char *buf, size_t count)
886 ret = strict_strtoul(buf, 0, &val);
893 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
894 show_timer_rate, store_timer_rate);
896 static ssize_t show_timer_slack(
897 struct kobject *kobj, struct attribute *attr, char *buf)
899 return sprintf(buf, "%d\n", timer_slack_val);
902 static ssize_t store_timer_slack(
903 struct kobject *kobj, struct attribute *attr, const char *buf,
909 ret = kstrtol(buf, 10, &val);
913 timer_slack_val = val;
917 define_one_global_rw(timer_slack);
919 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
922 return sprintf(buf, "%d\n", boost_val);
925 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
926 const char *buf, size_t count)
931 ret = kstrtoul(buf, 0, &val);
938 trace_cpufreq_interactive_boost("on");
939 cpufreq_interactive_boost();
941 trace_cpufreq_interactive_unboost("off");
947 define_one_global_rw(boost);
949 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
950 const char *buf, size_t count)
955 ret = kstrtoul(buf, 0, &val);
959 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
960 trace_cpufreq_interactive_boost("pulse");
961 cpufreq_interactive_boost();
965 static struct global_attr boostpulse =
966 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
968 static ssize_t show_boostpulse_duration(
969 struct kobject *kobj, struct attribute *attr, char *buf)
971 return sprintf(buf, "%d\n", boostpulse_duration_val);
974 static ssize_t store_boostpulse_duration(
975 struct kobject *kobj, struct attribute *attr, const char *buf,
981 ret = kstrtoul(buf, 0, &val);
985 boostpulse_duration_val = val;
989 define_one_global_rw(boostpulse_duration);
991 static ssize_t show_io_is_busy(struct kobject *kobj,
992 struct attribute *attr, char *buf)
994 return sprintf(buf, "%u\n", io_is_busy);
997 static ssize_t store_io_is_busy(struct kobject *kobj,
998 struct attribute *attr, const char *buf, size_t count)
1003 ret = kstrtoul(buf, 0, &val);
1010 static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
1011 show_io_is_busy, store_io_is_busy);
1013 static struct attribute *interactive_attributes[] = {
1014 &target_loads_attr.attr,
1015 &above_hispeed_delay_attr.attr,
1016 &hispeed_freq_attr.attr,
1017 &go_hispeed_load_attr.attr,
1018 &min_sample_time_attr.attr,
1019 &timer_rate_attr.attr,
1023 &boostpulse_duration.attr,
1024 &io_is_busy_attr.attr,
1028 static struct attribute_group interactive_attr_group = {
1029 .attrs = interactive_attributes,
1030 .name = "interactive",
1033 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1039 cpufreq_interactive_idle_start();
1042 cpufreq_interactive_idle_end();
1049 static struct notifier_block cpufreq_interactive_idle_nb = {
1050 .notifier_call = cpufreq_interactive_idle_notifier,
1053 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1058 struct cpufreq_interactive_cpuinfo *pcpu;
1059 struct cpufreq_frequency_table *freq_table;
1062 case CPUFREQ_GOV_START:
1063 mutex_lock(&gov_lock);
1066 cpufreq_frequency_get_table(policy->cpu);
1068 hispeed_freq = policy->max;
1070 for_each_cpu(j, policy->cpus) {
1071 pcpu = &per_cpu(cpuinfo, j);
1072 pcpu->policy = policy;
1073 pcpu->target_freq = policy->cur;
1074 pcpu->freq_table = freq_table;
1075 pcpu->floor_freq = pcpu->target_freq;
1076 pcpu->floor_validate_time =
1077 ktime_to_us(ktime_get());
1078 pcpu->hispeed_validate_time =
1079 pcpu->floor_validate_time;
1080 down_write(&pcpu->enable_sem);
1081 cpufreq_interactive_timer_start(j);
1082 pcpu->governor_enabled = 1;
1083 up_write(&pcpu->enable_sem);
1087 * Do not register the idle hook and create sysfs
1088 * entries if we have already done so.
1090 if (++active_count > 1) {
1091 mutex_unlock(&gov_lock);
1095 rc = sysfs_create_group(cpufreq_global_kobject,
1096 &interactive_attr_group);
1098 mutex_unlock(&gov_lock);
1102 idle_notifier_register(&cpufreq_interactive_idle_nb);
1103 cpufreq_register_notifier(
1104 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1105 mutex_unlock(&gov_lock);
1108 case CPUFREQ_GOV_STOP:
1109 mutex_lock(&gov_lock);
1110 for_each_cpu(j, policy->cpus) {
1111 pcpu = &per_cpu(cpuinfo, j);
1112 down_write(&pcpu->enable_sem);
1113 pcpu->governor_enabled = 0;
1114 del_timer_sync(&pcpu->cpu_timer);
1115 del_timer_sync(&pcpu->cpu_slack_timer);
1116 up_write(&pcpu->enable_sem);
1119 if (--active_count > 0) {
1120 mutex_unlock(&gov_lock);
1124 cpufreq_unregister_notifier(
1125 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1126 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1127 sysfs_remove_group(cpufreq_global_kobject,
1128 &interactive_attr_group);
1129 mutex_unlock(&gov_lock);
1133 case CPUFREQ_GOV_LIMITS:
1134 if (policy->max < policy->cur)
1135 __cpufreq_driver_target(policy,
1136 policy->max, CPUFREQ_RELATION_H);
1137 else if (policy->min > policy->cur)
1138 __cpufreq_driver_target(policy,
1139 policy->min, CPUFREQ_RELATION_L);
1140 for_each_cpu(j, policy->cpus) {
1141 pcpu = &per_cpu(cpuinfo, j);
1143 /* hold write semaphore to avoid race */
1144 down_write(&pcpu->enable_sem);
1145 if (pcpu->governor_enabled == 0) {
1146 up_write(&pcpu->enable_sem);
1150 /* update target_freq firstly */
1151 if (policy->max < pcpu->target_freq)
1152 pcpu->target_freq = policy->max;
1153 else if (policy->min > pcpu->target_freq)
1154 pcpu->target_freq = policy->min;
1156 /* Reschedule timer.
1157 * Delete the timers, else the timer callback may
1158 * return without re-arm the timer when failed
1159 * acquire the semaphore. This race may cause timer
1160 * stopped unexpectedly.
1162 del_timer_sync(&pcpu->cpu_timer);
1163 del_timer_sync(&pcpu->cpu_slack_timer);
1164 cpufreq_interactive_timer_start(j);
1165 up_write(&pcpu->enable_sem);
1172 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1175 struct cpufreq_governor cpufreq_gov_interactive = {
1176 .name = "interactive",
1177 .governor = cpufreq_governor_interactive,
1178 .max_transition_latency = 10000000,
1179 .owner = THIS_MODULE,
1182 static void cpufreq_interactive_nop_timer(unsigned long data)
1186 static int __init cpufreq_interactive_init(void)
1189 struct cpufreq_interactive_cpuinfo *pcpu;
1190 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1192 /* Initalize per-cpu timers */
1193 for_each_possible_cpu(i) {
1194 pcpu = &per_cpu(cpuinfo, i);
1195 init_timer_deferrable(&pcpu->cpu_timer);
1196 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1197 pcpu->cpu_timer.data = i;
1198 init_timer(&pcpu->cpu_slack_timer);
1199 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1200 spin_lock_init(&pcpu->load_lock);
1201 init_rwsem(&pcpu->enable_sem);
1204 spin_lock_init(&target_loads_lock);
1205 spin_lock_init(&speedchange_cpumask_lock);
1206 spin_lock_init(&above_hispeed_delay_lock);
1207 mutex_init(&gov_lock);
1209 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1211 if (IS_ERR(speedchange_task))
1212 return PTR_ERR(speedchange_task);
1214 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1215 get_task_struct(speedchange_task);
1217 /* NB: wake up so the thread does not look hung to the freezer */
1218 wake_up_process(speedchange_task);
1220 return cpufreq_register_governor(&cpufreq_gov_interactive);
1223 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1224 fs_initcall(cpufreq_interactive_init);
1226 module_init(cpufreq_interactive_init);
1229 static void __exit cpufreq_interactive_exit(void)
1231 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1232 kthread_stop(speedchange_task);
1233 put_task_struct(speedchange_task);
1236 module_exit(cpufreq_interactive_exit);
1238 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1239 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1240 "Latency sensitive workloads");
1241 MODULE_LICENSE("GPL");