cpufreq: interactive: Separate speed target revalidate time and initial set time
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/cpufreq_interactive.h>
35
36 #include <asm/cputime.h>
37
38 static atomic_t active_count = ATOMIC_INIT(0);
39
40 struct cpufreq_interactive_cpuinfo {
41         struct timer_list cpu_timer;
42         int timer_idlecancel;
43         u64 time_in_idle;
44         u64 idle_exit_time;
45         u64 timer_run_time;
46         int idling;
47         u64 target_set_time;
48         u64 target_set_time_in_idle;
49         u64 target_validate_time;
50         u64 target_validate_time_in_idle;
51         struct cpufreq_policy *policy;
52         struct cpufreq_frequency_table *freq_table;
53         unsigned int target_freq;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* Workqueues handle frequency scaling */
60 static struct task_struct *up_task;
61 static struct workqueue_struct *down_wq;
62 static struct work_struct freq_scale_down_work;
63 static cpumask_t up_cpumask;
64 static spinlock_t up_cpumask_lock;
65 static cpumask_t down_cpumask;
66 static spinlock_t down_cpumask_lock;
67 static struct mutex set_speed_lock;
68
69 /* Hi speed to bump to from lo speed when load burst (default max) */
70 static u64 hispeed_freq;
71
72 /* Go to hi speed when CPU load at or above this value. */
73 #define DEFAULT_GO_HISPEED_LOAD 85
74 static unsigned long go_hispeed_load;
75
76 /*
77  * The minimum amount of time to spend at a frequency before we can ramp down.
78  */
79 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
80 static unsigned long min_sample_time;
81
82 /*
83  * The sample rate of the timer used to increase frequency
84  */
85 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
86 static unsigned long timer_rate;
87
88 /*
89  * Wait this long before raising speed above hispeed, by default a single
90  * timer interval.
91  */
92 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
93 static unsigned long above_hispeed_delay_val;
94
95 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
96                 unsigned int event);
97
98 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
99 static
100 #endif
101 struct cpufreq_governor cpufreq_gov_interactive = {
102         .name = "interactive",
103         .governor = cpufreq_governor_interactive,
104         .max_transition_latency = 10000000,
105         .owner = THIS_MODULE,
106 };
107
108 static void cpufreq_interactive_timer(unsigned long data)
109 {
110         unsigned int delta_idle;
111         unsigned int delta_time;
112         int cpu_load;
113         int load_since_change;
114         u64 time_in_idle;
115         u64 idle_exit_time;
116         struct cpufreq_interactive_cpuinfo *pcpu =
117                 &per_cpu(cpuinfo, data);
118         u64 now_idle;
119         unsigned int new_freq;
120         unsigned int index;
121         unsigned long flags;
122
123         smp_rmb();
124
125         if (!pcpu->governor_enabled)
126                 goto exit;
127
128         /*
129          * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
130          * this lets idle exit know the current idle time sample has
131          * been processed, and idle exit can generate a new sample and
132          * re-arm the timer.  This prevents a concurrent idle
133          * exit on that CPU from writing a new set of info at the same time
134          * the timer function runs (the timer function can't use that info
135          * until more time passes).
136          */
137         time_in_idle = pcpu->time_in_idle;
138         idle_exit_time = pcpu->idle_exit_time;
139         now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
140         smp_wmb();
141
142         /* If we raced with cancelling a timer, skip. */
143         if (!idle_exit_time)
144                 goto exit;
145
146         delta_idle = (unsigned int)(now_idle - time_in_idle);
147         delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
148
149         /*
150          * If timer ran less than 1ms after short-term sample started, retry.
151          */
152         if (delta_time < 1000)
153                 goto rearm;
154
155         if (delta_idle > delta_time)
156                 cpu_load = 0;
157         else
158                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
159
160         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
161         delta_time = (unsigned int)(pcpu->timer_run_time -
162                                     pcpu->target_set_time);
163
164         if ((delta_time == 0) || (delta_idle > delta_time))
165                 load_since_change = 0;
166         else
167                 load_since_change =
168                         100 * (delta_time - delta_idle) / delta_time;
169
170         /*
171          * Choose greater of short-term load (since last idle timer
172          * started or timer function re-armed itself) or long-term load
173          * (since last frequency change).
174          */
175         if (load_since_change > cpu_load)
176                 cpu_load = load_since_change;
177
178         if (cpu_load >= go_hispeed_load) {
179                 if (pcpu->target_freq <= pcpu->policy->min) {
180                         new_freq = hispeed_freq;
181                 } else {
182                         new_freq = pcpu->policy->max * cpu_load / 100;
183
184                         if (new_freq < hispeed_freq)
185                                 new_freq = hispeed_freq;
186
187                         if (pcpu->target_freq == hispeed_freq &&
188                             new_freq > hispeed_freq &&
189                             pcpu->timer_run_time - pcpu->target_set_time
190                             < above_hispeed_delay_val) {
191                                 trace_cpufreq_interactive_notyet(data, cpu_load,
192                                                                  pcpu->target_freq,
193                                                                  new_freq);
194                                 goto rearm;
195                         }
196                 }
197         } else {
198                 new_freq = pcpu->policy->max * cpu_load / 100;
199         }
200
201         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
202                                            new_freq, CPUFREQ_RELATION_H,
203                                            &index)) {
204                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
205                              (int) data);
206                 goto rearm;
207         }
208
209         new_freq = pcpu->freq_table[index].frequency;
210
211         /*
212          * Do not scale down unless we have been at this frequency for the
213          * minimum sample time since last validated.
214          */
215         if (new_freq < pcpu->target_freq) {
216                 if (pcpu->timer_run_time - pcpu->target_validate_time
217                     < min_sample_time) {
218                         trace_cpufreq_interactive_notyet(data, cpu_load,
219                                          pcpu->target_freq, new_freq);
220                         goto rearm;
221                 }
222         }
223
224         pcpu->target_validate_time_in_idle = now_idle;
225         pcpu->target_validate_time = pcpu->timer_run_time;
226
227         if (pcpu->target_freq == new_freq) {
228                 trace_cpufreq_interactive_already(data, cpu_load,
229                                                   pcpu->target_freq, new_freq);
230                 goto rearm_if_notmax;
231         }
232
233         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
234                                          new_freq);
235         pcpu->target_set_time_in_idle = now_idle;
236         pcpu->target_set_time = pcpu->timer_run_time;
237
238         if (new_freq < pcpu->target_freq) {
239                 pcpu->target_freq = new_freq;
240                 spin_lock_irqsave(&down_cpumask_lock, flags);
241                 cpumask_set_cpu(data, &down_cpumask);
242                 spin_unlock_irqrestore(&down_cpumask_lock, flags);
243                 queue_work(down_wq, &freq_scale_down_work);
244         } else {
245                 pcpu->target_freq = new_freq;
246                 spin_lock_irqsave(&up_cpumask_lock, flags);
247                 cpumask_set_cpu(data, &up_cpumask);
248                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
249                 wake_up_process(up_task);
250         }
251
252 rearm_if_notmax:
253         /*
254          * Already set max speed and don't see a need to change that,
255          * wait until next idle to re-evaluate, don't need timer.
256          */
257         if (pcpu->target_freq == pcpu->policy->max)
258                 goto exit;
259
260 rearm:
261         if (!timer_pending(&pcpu->cpu_timer)) {
262                 /*
263                  * If already at min: if that CPU is idle, don't set timer.
264                  * Else cancel the timer if that CPU goes idle.  We don't
265                  * need to re-evaluate speed until the next idle exit.
266                  */
267                 if (pcpu->target_freq == pcpu->policy->min) {
268                         smp_rmb();
269
270                         if (pcpu->idling)
271                                 goto exit;
272
273                         pcpu->timer_idlecancel = 1;
274                 }
275
276                 pcpu->time_in_idle = get_cpu_idle_time_us(
277                         data, &pcpu->idle_exit_time);
278                 mod_timer(&pcpu->cpu_timer,
279                           jiffies + usecs_to_jiffies(timer_rate));
280         }
281
282 exit:
283         return;
284 }
285
286 static void cpufreq_interactive_idle_start(void)
287 {
288         struct cpufreq_interactive_cpuinfo *pcpu =
289                 &per_cpu(cpuinfo, smp_processor_id());
290         int pending;
291
292         if (!pcpu->governor_enabled)
293                 return;
294
295         pcpu->idling = 1;
296         smp_wmb();
297         pending = timer_pending(&pcpu->cpu_timer);
298
299         if (pcpu->target_freq != pcpu->policy->min) {
300 #ifdef CONFIG_SMP
301                 /*
302                  * Entering idle while not at lowest speed.  On some
303                  * platforms this can hold the other CPU(s) at that speed
304                  * even though the CPU is idle. Set a timer to re-evaluate
305                  * speed so this idle CPU doesn't hold the other CPUs above
306                  * min indefinitely.  This should probably be a quirk of
307                  * the CPUFreq driver.
308                  */
309                 if (!pending) {
310                         pcpu->time_in_idle = get_cpu_idle_time_us(
311                                 smp_processor_id(), &pcpu->idle_exit_time);
312                         pcpu->timer_idlecancel = 0;
313                         mod_timer(&pcpu->cpu_timer,
314                                   jiffies + usecs_to_jiffies(timer_rate));
315                 }
316 #endif
317         } else {
318                 /*
319                  * If at min speed and entering idle after load has
320                  * already been evaluated, and a timer has been set just in
321                  * case the CPU suddenly goes busy, cancel that timer.  The
322                  * CPU didn't go busy; we'll recheck things upon idle exit.
323                  */
324                 if (pending && pcpu->timer_idlecancel) {
325                         del_timer(&pcpu->cpu_timer);
326                         /*
327                          * Ensure last timer run time is after current idle
328                          * sample start time, so next idle exit will always
329                          * start a new idle sampling period.
330                          */
331                         pcpu->idle_exit_time = 0;
332                         pcpu->timer_idlecancel = 0;
333                 }
334         }
335
336 }
337
338 static void cpufreq_interactive_idle_end(void)
339 {
340         struct cpufreq_interactive_cpuinfo *pcpu =
341                 &per_cpu(cpuinfo, smp_processor_id());
342
343         pcpu->idling = 0;
344         smp_wmb();
345
346         /*
347          * Arm the timer for 1-2 ticks later if not already, and if the timer
348          * function has already processed the previous load sampling
349          * interval.  (If the timer is not pending but has not processed
350          * the previous interval, it is probably racing with us on another
351          * CPU.  Let it compute load based on the previous sample and then
352          * re-arm the timer for another interval when it's done, rather
353          * than updating the interval start time to be "now", which doesn't
354          * give the timer function enough time to make a decision on this
355          * run.)
356          */
357         if (timer_pending(&pcpu->cpu_timer) == 0 &&
358             pcpu->timer_run_time >= pcpu->idle_exit_time &&
359             pcpu->governor_enabled) {
360                 pcpu->time_in_idle =
361                         get_cpu_idle_time_us(smp_processor_id(),
362                                              &pcpu->idle_exit_time);
363                 pcpu->timer_idlecancel = 0;
364                 mod_timer(&pcpu->cpu_timer,
365                           jiffies + usecs_to_jiffies(timer_rate));
366         }
367
368 }
369
370 static int cpufreq_interactive_up_task(void *data)
371 {
372         unsigned int cpu;
373         cpumask_t tmp_mask;
374         unsigned long flags;
375         struct cpufreq_interactive_cpuinfo *pcpu;
376
377         while (1) {
378                 set_current_state(TASK_INTERRUPTIBLE);
379                 spin_lock_irqsave(&up_cpumask_lock, flags);
380
381                 if (cpumask_empty(&up_cpumask)) {
382                         spin_unlock_irqrestore(&up_cpumask_lock, flags);
383                         schedule();
384
385                         if (kthread_should_stop())
386                                 break;
387
388                         spin_lock_irqsave(&up_cpumask_lock, flags);
389                 }
390
391                 set_current_state(TASK_RUNNING);
392                 tmp_mask = up_cpumask;
393                 cpumask_clear(&up_cpumask);
394                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
395
396                 for_each_cpu(cpu, &tmp_mask) {
397                         unsigned int j;
398                         unsigned int max_freq = 0;
399
400                         pcpu = &per_cpu(cpuinfo, cpu);
401                         smp_rmb();
402
403                         if (!pcpu->governor_enabled)
404                                 continue;
405
406                         mutex_lock(&set_speed_lock);
407
408                         for_each_cpu(j, pcpu->policy->cpus) {
409                                 struct cpufreq_interactive_cpuinfo *pjcpu =
410                                         &per_cpu(cpuinfo, j);
411
412                                 if (pjcpu->target_freq > max_freq)
413                                         max_freq = pjcpu->target_freq;
414                         }
415
416                         if (max_freq != pcpu->policy->cur)
417                                 __cpufreq_driver_target(pcpu->policy,
418                                                         max_freq,
419                                                         CPUFREQ_RELATION_H);
420                         mutex_unlock(&set_speed_lock);
421                         trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
422                                                      pcpu->policy->cur);
423                 }
424         }
425
426         return 0;
427 }
428
429 static void cpufreq_interactive_freq_down(struct work_struct *work)
430 {
431         unsigned int cpu;
432         cpumask_t tmp_mask;
433         unsigned long flags;
434         struct cpufreq_interactive_cpuinfo *pcpu;
435
436         spin_lock_irqsave(&down_cpumask_lock, flags);
437         tmp_mask = down_cpumask;
438         cpumask_clear(&down_cpumask);
439         spin_unlock_irqrestore(&down_cpumask_lock, flags);
440
441         for_each_cpu(cpu, &tmp_mask) {
442                 unsigned int j;
443                 unsigned int max_freq = 0;
444
445                 pcpu = &per_cpu(cpuinfo, cpu);
446                 smp_rmb();
447
448                 if (!pcpu->governor_enabled)
449                         continue;
450
451                 mutex_lock(&set_speed_lock);
452
453                 for_each_cpu(j, pcpu->policy->cpus) {
454                         struct cpufreq_interactive_cpuinfo *pjcpu =
455                                 &per_cpu(cpuinfo, j);
456
457                         if (pjcpu->target_freq > max_freq)
458                                 max_freq = pjcpu->target_freq;
459                 }
460
461                 if (max_freq != pcpu->policy->cur)
462                         __cpufreq_driver_target(pcpu->policy, max_freq,
463                                                 CPUFREQ_RELATION_H);
464
465                 mutex_unlock(&set_speed_lock);
466                 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
467                                                pcpu->policy->cur);
468         }
469 }
470
471 static ssize_t show_hispeed_freq(struct kobject *kobj,
472                                  struct attribute *attr, char *buf)
473 {
474         return sprintf(buf, "%llu\n", hispeed_freq);
475 }
476
477 static ssize_t store_hispeed_freq(struct kobject *kobj,
478                                   struct attribute *attr, const char *buf,
479                                   size_t count)
480 {
481         int ret;
482         u64 val;
483
484         ret = strict_strtoull(buf, 0, &val);
485         if (ret < 0)
486                 return ret;
487         hispeed_freq = val;
488         return count;
489 }
490
491 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
492                 show_hispeed_freq, store_hispeed_freq);
493
494
495 static ssize_t show_go_hispeed_load(struct kobject *kobj,
496                                      struct attribute *attr, char *buf)
497 {
498         return sprintf(buf, "%lu\n", go_hispeed_load);
499 }
500
501 static ssize_t store_go_hispeed_load(struct kobject *kobj,
502                         struct attribute *attr, const char *buf, size_t count)
503 {
504         int ret;
505         unsigned long val;
506
507         ret = strict_strtoul(buf, 0, &val);
508         if (ret < 0)
509                 return ret;
510         go_hispeed_load = val;
511         return count;
512 }
513
514 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
515                 show_go_hispeed_load, store_go_hispeed_load);
516
517 static ssize_t show_min_sample_time(struct kobject *kobj,
518                                 struct attribute *attr, char *buf)
519 {
520         return sprintf(buf, "%lu\n", min_sample_time);
521 }
522
523 static ssize_t store_min_sample_time(struct kobject *kobj,
524                         struct attribute *attr, const char *buf, size_t count)
525 {
526         int ret;
527         unsigned long val;
528
529         ret = strict_strtoul(buf, 0, &val);
530         if (ret < 0)
531                 return ret;
532         min_sample_time = val;
533         return count;
534 }
535
536 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
537                 show_min_sample_time, store_min_sample_time);
538
539 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
540                                         struct attribute *attr, char *buf)
541 {
542         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
543 }
544
545 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
546                                          struct attribute *attr,
547                                          const char *buf, size_t count)
548 {
549         int ret;
550         unsigned long val;
551
552         ret = strict_strtoul(buf, 0, &val);
553         if (ret < 0)
554                 return ret;
555         above_hispeed_delay_val = val;
556         return count;
557 }
558
559 define_one_global_rw(above_hispeed_delay);
560
561 static ssize_t show_timer_rate(struct kobject *kobj,
562                         struct attribute *attr, char *buf)
563 {
564         return sprintf(buf, "%lu\n", timer_rate);
565 }
566
567 static ssize_t store_timer_rate(struct kobject *kobj,
568                         struct attribute *attr, const char *buf, size_t count)
569 {
570         int ret;
571         unsigned long val;
572
573         ret = strict_strtoul(buf, 0, &val);
574         if (ret < 0)
575                 return ret;
576         timer_rate = val;
577         return count;
578 }
579
580 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
581                 show_timer_rate, store_timer_rate);
582
583 static struct attribute *interactive_attributes[] = {
584         &hispeed_freq_attr.attr,
585         &go_hispeed_load_attr.attr,
586         &above_hispeed_delay.attr,
587         &min_sample_time_attr.attr,
588         &timer_rate_attr.attr,
589         NULL,
590 };
591
592 static struct attribute_group interactive_attr_group = {
593         .attrs = interactive_attributes,
594         .name = "interactive",
595 };
596
597 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
598                 unsigned int event)
599 {
600         int rc;
601         unsigned int j;
602         struct cpufreq_interactive_cpuinfo *pcpu;
603         struct cpufreq_frequency_table *freq_table;
604
605         switch (event) {
606         case CPUFREQ_GOV_START:
607                 if (!cpu_online(policy->cpu))
608                         return -EINVAL;
609
610                 freq_table =
611                         cpufreq_frequency_get_table(policy->cpu);
612
613                 for_each_cpu(j, policy->cpus) {
614                         pcpu = &per_cpu(cpuinfo, j);
615                         pcpu->policy = policy;
616                         pcpu->target_freq = policy->cur;
617                         pcpu->freq_table = freq_table;
618                         pcpu->target_set_time_in_idle =
619                                 get_cpu_idle_time_us(j,
620                                              &pcpu->target_set_time);
621                         pcpu->target_validate_time =
622                                 pcpu->target_set_time;
623                         pcpu->target_validate_time_in_idle =
624                                 pcpu->target_set_time_in_idle;
625                         pcpu->governor_enabled = 1;
626                         smp_wmb();
627                 }
628
629                 if (!hispeed_freq)
630                         hispeed_freq = policy->max;
631
632                 /*
633                  * Do not register the idle hook and create sysfs
634                  * entries if we have already done so.
635                  */
636                 if (atomic_inc_return(&active_count) > 1)
637                         return 0;
638
639                 rc = sysfs_create_group(cpufreq_global_kobject,
640                                 &interactive_attr_group);
641                 if (rc)
642                         return rc;
643
644                 break;
645
646         case CPUFREQ_GOV_STOP:
647                 for_each_cpu(j, policy->cpus) {
648                         pcpu = &per_cpu(cpuinfo, j);
649                         pcpu->governor_enabled = 0;
650                         smp_wmb();
651                         del_timer_sync(&pcpu->cpu_timer);
652
653                         /*
654                          * Reset idle exit time since we may cancel the timer
655                          * before it can run after the last idle exit time,
656                          * to avoid tripping the check in idle exit for a timer
657                          * that is trying to run.
658                          */
659                         pcpu->idle_exit_time = 0;
660                 }
661
662                 flush_work(&freq_scale_down_work);
663                 if (atomic_dec_return(&active_count) > 0)
664                         return 0;
665
666                 sysfs_remove_group(cpufreq_global_kobject,
667                                 &interactive_attr_group);
668
669                 break;
670
671         case CPUFREQ_GOV_LIMITS:
672                 if (policy->max < policy->cur)
673                         __cpufreq_driver_target(policy,
674                                         policy->max, CPUFREQ_RELATION_H);
675                 else if (policy->min > policy->cur)
676                         __cpufreq_driver_target(policy,
677                                         policy->min, CPUFREQ_RELATION_L);
678                 break;
679         }
680         return 0;
681 }
682
683 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
684                                              unsigned long val,
685                                              void *data)
686 {
687         switch (val) {
688         case IDLE_START:
689                 cpufreq_interactive_idle_start();
690                 break;
691         case IDLE_END:
692                 cpufreq_interactive_idle_end();
693                 break;
694         }
695
696         return 0;
697 }
698
699 static struct notifier_block cpufreq_interactive_idle_nb = {
700         .notifier_call = cpufreq_interactive_idle_notifier,
701 };
702
703 static int __init cpufreq_interactive_init(void)
704 {
705         unsigned int i;
706         struct cpufreq_interactive_cpuinfo *pcpu;
707         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
708
709         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
710         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
711         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
712         timer_rate = DEFAULT_TIMER_RATE;
713
714         /* Initalize per-cpu timers */
715         for_each_possible_cpu(i) {
716                 pcpu = &per_cpu(cpuinfo, i);
717                 init_timer(&pcpu->cpu_timer);
718                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
719                 pcpu->cpu_timer.data = i;
720         }
721
722         up_task = kthread_create(cpufreq_interactive_up_task, NULL,
723                                  "kinteractiveup");
724         if (IS_ERR(up_task))
725                 return PTR_ERR(up_task);
726
727         sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
728         get_task_struct(up_task);
729
730         /* No rescuer thread, bind to CPU queuing the work for possibly
731            warm cache (probably doesn't matter much). */
732         down_wq = alloc_workqueue("knteractive_down", 0, 1);
733
734         if (!down_wq)
735                 goto err_freeuptask;
736
737         INIT_WORK(&freq_scale_down_work,
738                   cpufreq_interactive_freq_down);
739
740         spin_lock_init(&up_cpumask_lock);
741         spin_lock_init(&down_cpumask_lock);
742         mutex_init(&set_speed_lock);
743
744         idle_notifier_register(&cpufreq_interactive_idle_nb);
745
746         return cpufreq_register_governor(&cpufreq_gov_interactive);
747
748 err_freeuptask:
749         put_task_struct(up_task);
750         return -ENOMEM;
751 }
752
753 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
754 fs_initcall(cpufreq_interactive_init);
755 #else
756 module_init(cpufreq_interactive_init);
757 #endif
758
759 static void __exit cpufreq_interactive_exit(void)
760 {
761         cpufreq_unregister_governor(&cpufreq_gov_interactive);
762         kthread_stop(up_task);
763         put_task_struct(up_task);
764         destroy_workqueue(down_wq);
765 }
766
767 module_exit(cpufreq_interactive_exit);
768
769 MODULE_AUTHOR("Mike Chan <mike@android.com>");
770 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
771         "Latency sensitive workloads");
772 MODULE_LICENSE("GPL");