cpufreq: interactive: Boost frequency on touchscreen input
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/input.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 #include <asm/cputime.h>
39
40 static atomic_t active_count = ATOMIC_INIT(0);
41
42 struct cpufreq_interactive_cpuinfo {
43         struct timer_list cpu_timer;
44         int timer_idlecancel;
45         u64 time_in_idle;
46         u64 idle_exit_time;
47         u64 timer_run_time;
48         int idling;
49         u64 target_set_time;
50         u64 target_set_time_in_idle;
51         u64 target_validate_time;
52         u64 target_validate_time_in_idle;
53         struct cpufreq_policy *policy;
54         struct cpufreq_frequency_table *freq_table;
55         unsigned int target_freq;
56         int governor_enabled;
57 };
58
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60
61 /* Workqueues handle frequency scaling */
62 static struct task_struct *up_task;
63 static struct workqueue_struct *down_wq;
64 static struct work_struct freq_scale_down_work;
65 static cpumask_t up_cpumask;
66 static spinlock_t up_cpumask_lock;
67 static cpumask_t down_cpumask;
68 static spinlock_t down_cpumask_lock;
69 static struct mutex set_speed_lock;
70
71 /* Hi speed to bump to from lo speed when load burst (default max) */
72 static u64 hispeed_freq;
73
74 /* Go to hi speed when CPU load at or above this value. */
75 #define DEFAULT_GO_HISPEED_LOAD 85
76 static unsigned long go_hispeed_load;
77
78 /*
79  * The minimum amount of time to spend at a frequency before we can ramp down.
80  */
81 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
82 static unsigned long min_sample_time;
83
84 /*
85  * The sample rate of the timer used to increase frequency
86  */
87 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
88 static unsigned long timer_rate;
89
90 /*
91  * Wait this long before raising speed above hispeed, by default a single
92  * timer interval.
93  */
94 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95 static unsigned long above_hispeed_delay_val;
96
97 /*
98  * Boost to hispeed on touchscreen input.
99  */
100
101 static int input_boost_val;
102
103 struct cpufreq_interactive_inputopen {
104         struct input_handle *handle;
105         struct work_struct inputopen_work;
106 };
107
108 static struct cpufreq_interactive_inputopen inputopen;
109
110 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
111                 unsigned int event);
112
113 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
114 static
115 #endif
116 struct cpufreq_governor cpufreq_gov_interactive = {
117         .name = "interactive",
118         .governor = cpufreq_governor_interactive,
119         .max_transition_latency = 10000000,
120         .owner = THIS_MODULE,
121 };
122
123 static void cpufreq_interactive_timer(unsigned long data)
124 {
125         unsigned int delta_idle;
126         unsigned int delta_time;
127         int cpu_load;
128         int load_since_change;
129         u64 time_in_idle;
130         u64 idle_exit_time;
131         struct cpufreq_interactive_cpuinfo *pcpu =
132                 &per_cpu(cpuinfo, data);
133         u64 now_idle;
134         unsigned int new_freq;
135         unsigned int index;
136         unsigned long flags;
137
138         smp_rmb();
139
140         if (!pcpu->governor_enabled)
141                 goto exit;
142
143         /*
144          * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
145          * this lets idle exit know the current idle time sample has
146          * been processed, and idle exit can generate a new sample and
147          * re-arm the timer.  This prevents a concurrent idle
148          * exit on that CPU from writing a new set of info at the same time
149          * the timer function runs (the timer function can't use that info
150          * until more time passes).
151          */
152         time_in_idle = pcpu->time_in_idle;
153         idle_exit_time = pcpu->idle_exit_time;
154         now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
155         smp_wmb();
156
157         /* If we raced with cancelling a timer, skip. */
158         if (!idle_exit_time)
159                 goto exit;
160
161         delta_idle = (unsigned int)(now_idle - time_in_idle);
162         delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
163
164         /*
165          * If timer ran less than 1ms after short-term sample started, retry.
166          */
167         if (delta_time < 1000)
168                 goto rearm;
169
170         if (delta_idle > delta_time)
171                 cpu_load = 0;
172         else
173                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
174
175         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
176         delta_time = (unsigned int)(pcpu->timer_run_time -
177                                     pcpu->target_set_time);
178
179         if ((delta_time == 0) || (delta_idle > delta_time))
180                 load_since_change = 0;
181         else
182                 load_since_change =
183                         100 * (delta_time - delta_idle) / delta_time;
184
185         /*
186          * Choose greater of short-term load (since last idle timer
187          * started or timer function re-armed itself) or long-term load
188          * (since last frequency change).
189          */
190         if (load_since_change > cpu_load)
191                 cpu_load = load_since_change;
192
193         if (cpu_load >= go_hispeed_load) {
194                 if (pcpu->target_freq <= pcpu->policy->min) {
195                         new_freq = hispeed_freq;
196                 } else {
197                         new_freq = pcpu->policy->max * cpu_load / 100;
198
199                         if (new_freq < hispeed_freq)
200                                 new_freq = hispeed_freq;
201
202                         if (pcpu->target_freq == hispeed_freq &&
203                             new_freq > hispeed_freq &&
204                             pcpu->timer_run_time - pcpu->target_set_time
205                             < above_hispeed_delay_val) {
206                                 trace_cpufreq_interactive_notyet(data, cpu_load,
207                                                                  pcpu->target_freq,
208                                                                  new_freq);
209                                 goto rearm;
210                         }
211                 }
212         } else {
213                 new_freq = pcpu->policy->max * cpu_load / 100;
214         }
215
216         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
217                                            new_freq, CPUFREQ_RELATION_H,
218                                            &index)) {
219                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
220                              (int) data);
221                 goto rearm;
222         }
223
224         new_freq = pcpu->freq_table[index].frequency;
225
226         /*
227          * Do not scale down unless we have been at this frequency for the
228          * minimum sample time since last validated.
229          */
230         if (new_freq < pcpu->target_freq) {
231                 if (pcpu->timer_run_time - pcpu->target_validate_time
232                     < min_sample_time) {
233                         trace_cpufreq_interactive_notyet(data, cpu_load,
234                                          pcpu->target_freq, new_freq);
235                         goto rearm;
236                 }
237         }
238
239         pcpu->target_validate_time_in_idle = now_idle;
240         pcpu->target_validate_time = pcpu->timer_run_time;
241
242         if (pcpu->target_freq == new_freq) {
243                 trace_cpufreq_interactive_already(data, cpu_load,
244                                                   pcpu->target_freq, new_freq);
245                 goto rearm_if_notmax;
246         }
247
248         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
249                                          new_freq);
250         pcpu->target_set_time_in_idle = now_idle;
251         pcpu->target_set_time = pcpu->timer_run_time;
252
253         if (new_freq < pcpu->target_freq) {
254                 pcpu->target_freq = new_freq;
255                 spin_lock_irqsave(&down_cpumask_lock, flags);
256                 cpumask_set_cpu(data, &down_cpumask);
257                 spin_unlock_irqrestore(&down_cpumask_lock, flags);
258                 queue_work(down_wq, &freq_scale_down_work);
259         } else {
260                 pcpu->target_freq = new_freq;
261                 spin_lock_irqsave(&up_cpumask_lock, flags);
262                 cpumask_set_cpu(data, &up_cpumask);
263                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
264                 wake_up_process(up_task);
265         }
266
267 rearm_if_notmax:
268         /*
269          * Already set max speed and don't see a need to change that,
270          * wait until next idle to re-evaluate, don't need timer.
271          */
272         if (pcpu->target_freq == pcpu->policy->max)
273                 goto exit;
274
275 rearm:
276         if (!timer_pending(&pcpu->cpu_timer)) {
277                 /*
278                  * If already at min: if that CPU is idle, don't set timer.
279                  * Else cancel the timer if that CPU goes idle.  We don't
280                  * need to re-evaluate speed until the next idle exit.
281                  */
282                 if (pcpu->target_freq == pcpu->policy->min) {
283                         smp_rmb();
284
285                         if (pcpu->idling)
286                                 goto exit;
287
288                         pcpu->timer_idlecancel = 1;
289                 }
290
291                 pcpu->time_in_idle = get_cpu_idle_time_us(
292                         data, &pcpu->idle_exit_time);
293                 mod_timer(&pcpu->cpu_timer,
294                           jiffies + usecs_to_jiffies(timer_rate));
295         }
296
297 exit:
298         return;
299 }
300
301 static void cpufreq_interactive_idle_start(void)
302 {
303         struct cpufreq_interactive_cpuinfo *pcpu =
304                 &per_cpu(cpuinfo, smp_processor_id());
305         int pending;
306
307         if (!pcpu->governor_enabled)
308                 return;
309
310         pcpu->idling = 1;
311         smp_wmb();
312         pending = timer_pending(&pcpu->cpu_timer);
313
314         if (pcpu->target_freq != pcpu->policy->min) {
315 #ifdef CONFIG_SMP
316                 /*
317                  * Entering idle while not at lowest speed.  On some
318                  * platforms this can hold the other CPU(s) at that speed
319                  * even though the CPU is idle. Set a timer to re-evaluate
320                  * speed so this idle CPU doesn't hold the other CPUs above
321                  * min indefinitely.  This should probably be a quirk of
322                  * the CPUFreq driver.
323                  */
324                 if (!pending) {
325                         pcpu->time_in_idle = get_cpu_idle_time_us(
326                                 smp_processor_id(), &pcpu->idle_exit_time);
327                         pcpu->timer_idlecancel = 0;
328                         mod_timer(&pcpu->cpu_timer,
329                                   jiffies + usecs_to_jiffies(timer_rate));
330                 }
331 #endif
332         } else {
333                 /*
334                  * If at min speed and entering idle after load has
335                  * already been evaluated, and a timer has been set just in
336                  * case the CPU suddenly goes busy, cancel that timer.  The
337                  * CPU didn't go busy; we'll recheck things upon idle exit.
338                  */
339                 if (pending && pcpu->timer_idlecancel) {
340                         del_timer(&pcpu->cpu_timer);
341                         /*
342                          * Ensure last timer run time is after current idle
343                          * sample start time, so next idle exit will always
344                          * start a new idle sampling period.
345                          */
346                         pcpu->idle_exit_time = 0;
347                         pcpu->timer_idlecancel = 0;
348                 }
349         }
350
351 }
352
353 static void cpufreq_interactive_idle_end(void)
354 {
355         struct cpufreq_interactive_cpuinfo *pcpu =
356                 &per_cpu(cpuinfo, smp_processor_id());
357
358         pcpu->idling = 0;
359         smp_wmb();
360
361         /*
362          * Arm the timer for 1-2 ticks later if not already, and if the timer
363          * function has already processed the previous load sampling
364          * interval.  (If the timer is not pending but has not processed
365          * the previous interval, it is probably racing with us on another
366          * CPU.  Let it compute load based on the previous sample and then
367          * re-arm the timer for another interval when it's done, rather
368          * than updating the interval start time to be "now", which doesn't
369          * give the timer function enough time to make a decision on this
370          * run.)
371          */
372         if (timer_pending(&pcpu->cpu_timer) == 0 &&
373             pcpu->timer_run_time >= pcpu->idle_exit_time &&
374             pcpu->governor_enabled) {
375                 pcpu->time_in_idle =
376                         get_cpu_idle_time_us(smp_processor_id(),
377                                              &pcpu->idle_exit_time);
378                 pcpu->timer_idlecancel = 0;
379                 mod_timer(&pcpu->cpu_timer,
380                           jiffies + usecs_to_jiffies(timer_rate));
381         }
382
383 }
384
385 static int cpufreq_interactive_up_task(void *data)
386 {
387         unsigned int cpu;
388         cpumask_t tmp_mask;
389         unsigned long flags;
390         struct cpufreq_interactive_cpuinfo *pcpu;
391
392         while (1) {
393                 set_current_state(TASK_INTERRUPTIBLE);
394                 spin_lock_irqsave(&up_cpumask_lock, flags);
395
396                 if (cpumask_empty(&up_cpumask)) {
397                         spin_unlock_irqrestore(&up_cpumask_lock, flags);
398                         schedule();
399
400                         if (kthread_should_stop())
401                                 break;
402
403                         spin_lock_irqsave(&up_cpumask_lock, flags);
404                 }
405
406                 set_current_state(TASK_RUNNING);
407                 tmp_mask = up_cpumask;
408                 cpumask_clear(&up_cpumask);
409                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
410
411                 for_each_cpu(cpu, &tmp_mask) {
412                         unsigned int j;
413                         unsigned int max_freq = 0;
414
415                         pcpu = &per_cpu(cpuinfo, cpu);
416                         smp_rmb();
417
418                         if (!pcpu->governor_enabled)
419                                 continue;
420
421                         mutex_lock(&set_speed_lock);
422
423                         for_each_cpu(j, pcpu->policy->cpus) {
424                                 struct cpufreq_interactive_cpuinfo *pjcpu =
425                                         &per_cpu(cpuinfo, j);
426
427                                 if (pjcpu->target_freq > max_freq)
428                                         max_freq = pjcpu->target_freq;
429                         }
430
431                         if (max_freq != pcpu->policy->cur)
432                                 __cpufreq_driver_target(pcpu->policy,
433                                                         max_freq,
434                                                         CPUFREQ_RELATION_H);
435                         mutex_unlock(&set_speed_lock);
436                         trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
437                                                      pcpu->policy->cur);
438                 }
439         }
440
441         return 0;
442 }
443
444 static void cpufreq_interactive_freq_down(struct work_struct *work)
445 {
446         unsigned int cpu;
447         cpumask_t tmp_mask;
448         unsigned long flags;
449         struct cpufreq_interactive_cpuinfo *pcpu;
450
451         spin_lock_irqsave(&down_cpumask_lock, flags);
452         tmp_mask = down_cpumask;
453         cpumask_clear(&down_cpumask);
454         spin_unlock_irqrestore(&down_cpumask_lock, flags);
455
456         for_each_cpu(cpu, &tmp_mask) {
457                 unsigned int j;
458                 unsigned int max_freq = 0;
459
460                 pcpu = &per_cpu(cpuinfo, cpu);
461                 smp_rmb();
462
463                 if (!pcpu->governor_enabled)
464                         continue;
465
466                 mutex_lock(&set_speed_lock);
467
468                 for_each_cpu(j, pcpu->policy->cpus) {
469                         struct cpufreq_interactive_cpuinfo *pjcpu =
470                                 &per_cpu(cpuinfo, j);
471
472                         if (pjcpu->target_freq > max_freq)
473                                 max_freq = pjcpu->target_freq;
474                 }
475
476                 if (max_freq != pcpu->policy->cur)
477                         __cpufreq_driver_target(pcpu->policy, max_freq,
478                                                 CPUFREQ_RELATION_H);
479
480                 mutex_unlock(&set_speed_lock);
481                 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
482                                                pcpu->policy->cur);
483         }
484 }
485
486 static void cpufreq_interactive_boost(void)
487 {
488         int i;
489         int anyboost = 0;
490         unsigned long flags;
491         struct cpufreq_interactive_cpuinfo *pcpu;
492
493         trace_cpufreq_interactive_boost(hispeed_freq);
494         spin_lock_irqsave(&up_cpumask_lock, flags);
495
496         for_each_online_cpu(i) {
497                 pcpu = &per_cpu(cpuinfo, i);
498
499                 if (pcpu->target_freq < hispeed_freq) {
500                         pcpu->target_freq = hispeed_freq;
501                         cpumask_set_cpu(i, &up_cpumask);
502                         pcpu->target_set_time_in_idle =
503                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
504                         anyboost = 1;
505                 }
506
507                 /*
508                  * Refresh time at which current (possibly being
509                  * boosted) speed last validated (reset timer for
510                  * allowing speed to drop).
511                  */
512
513                 pcpu->target_validate_time_in_idle =
514                         get_cpu_idle_time_us(i, &pcpu->target_validate_time);
515         }
516
517         spin_unlock_irqrestore(&up_cpumask_lock, flags);
518
519         if (anyboost)
520                 wake_up_process(up_task);
521 }
522
523 static void cpufreq_interactive_input_event(struct input_handle *handle,
524                                             unsigned int type,
525                                             unsigned int code, int value)
526 {
527         if (input_boost_val && type == EV_SYN && code == SYN_REPORT)
528                 cpufreq_interactive_boost();
529 }
530
531 static void cpufreq_interactive_input_open(struct work_struct *w)
532 {
533         struct cpufreq_interactive_inputopen *io =
534                 container_of(w, struct cpufreq_interactive_inputopen,
535                              inputopen_work);
536         int error;
537
538         error = input_open_device(io->handle);
539         if (error)
540                 input_unregister_handle(io->handle);
541 }
542
543 static int cpufreq_interactive_input_connect(struct input_handler *handler,
544                                              struct input_dev *dev,
545                                              const struct input_device_id *id)
546 {
547         struct input_handle *handle;
548         int error;
549
550         pr_info("%s: connect to %s\n", __func__, dev->name);
551         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
552         if (!handle)
553                 return -ENOMEM;
554
555         handle->dev = dev;
556         handle->handler = handler;
557         handle->name = "cpufreq_interactive";
558
559         error = input_register_handle(handle);
560         if (error)
561                 goto err;
562
563         inputopen.handle = handle;
564         queue_work(down_wq, &inputopen.inputopen_work);
565         return 0;
566 err:
567         kfree(handle);
568         return error;
569 }
570
571 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
572 {
573         input_close_device(handle);
574         input_unregister_handle(handle);
575         kfree(handle);
576 }
577
578 static const struct input_device_id cpufreq_interactive_ids[] = {
579         {
580                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
581                          INPUT_DEVICE_ID_MATCH_ABSBIT,
582                 .evbit = { BIT_MASK(EV_ABS) },
583                 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
584                             BIT_MASK(ABS_MT_POSITION_X) |
585                             BIT_MASK(ABS_MT_POSITION_Y) },
586         }, /* multi-touch touchscreen */
587         {
588                 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
589                          INPUT_DEVICE_ID_MATCH_ABSBIT,
590                 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
591                 .absbit = { [BIT_WORD(ABS_X)] =
592                             BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
593         }, /* touchpad */
594         { },
595 };
596
597 static struct input_handler cpufreq_interactive_input_handler = {
598         .event          = cpufreq_interactive_input_event,
599         .connect        = cpufreq_interactive_input_connect,
600         .disconnect     = cpufreq_interactive_input_disconnect,
601         .name           = "cpufreq_interactive",
602         .id_table       = cpufreq_interactive_ids,
603 };
604
605 static ssize_t show_hispeed_freq(struct kobject *kobj,
606                                  struct attribute *attr, char *buf)
607 {
608         return sprintf(buf, "%llu\n", hispeed_freq);
609 }
610
611 static ssize_t store_hispeed_freq(struct kobject *kobj,
612                                   struct attribute *attr, const char *buf,
613                                   size_t count)
614 {
615         int ret;
616         u64 val;
617
618         ret = strict_strtoull(buf, 0, &val);
619         if (ret < 0)
620                 return ret;
621         hispeed_freq = val;
622         return count;
623 }
624
625 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
626                 show_hispeed_freq, store_hispeed_freq);
627
628
629 static ssize_t show_go_hispeed_load(struct kobject *kobj,
630                                      struct attribute *attr, char *buf)
631 {
632         return sprintf(buf, "%lu\n", go_hispeed_load);
633 }
634
635 static ssize_t store_go_hispeed_load(struct kobject *kobj,
636                         struct attribute *attr, const char *buf, size_t count)
637 {
638         int ret;
639         unsigned long val;
640
641         ret = strict_strtoul(buf, 0, &val);
642         if (ret < 0)
643                 return ret;
644         go_hispeed_load = val;
645         return count;
646 }
647
648 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
649                 show_go_hispeed_load, store_go_hispeed_load);
650
651 static ssize_t show_min_sample_time(struct kobject *kobj,
652                                 struct attribute *attr, char *buf)
653 {
654         return sprintf(buf, "%lu\n", min_sample_time);
655 }
656
657 static ssize_t store_min_sample_time(struct kobject *kobj,
658                         struct attribute *attr, const char *buf, size_t count)
659 {
660         int ret;
661         unsigned long val;
662
663         ret = strict_strtoul(buf, 0, &val);
664         if (ret < 0)
665                 return ret;
666         min_sample_time = val;
667         return count;
668 }
669
670 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
671                 show_min_sample_time, store_min_sample_time);
672
673 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
674                                         struct attribute *attr, char *buf)
675 {
676         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
677 }
678
679 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
680                                          struct attribute *attr,
681                                          const char *buf, size_t count)
682 {
683         int ret;
684         unsigned long val;
685
686         ret = strict_strtoul(buf, 0, &val);
687         if (ret < 0)
688                 return ret;
689         above_hispeed_delay_val = val;
690         return count;
691 }
692
693 define_one_global_rw(above_hispeed_delay);
694
695 static ssize_t show_timer_rate(struct kobject *kobj,
696                         struct attribute *attr, char *buf)
697 {
698         return sprintf(buf, "%lu\n", timer_rate);
699 }
700
701 static ssize_t store_timer_rate(struct kobject *kobj,
702                         struct attribute *attr, const char *buf, size_t count)
703 {
704         int ret;
705         unsigned long val;
706
707         ret = strict_strtoul(buf, 0, &val);
708         if (ret < 0)
709                 return ret;
710         timer_rate = val;
711         return count;
712 }
713
714 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
715                 show_timer_rate, store_timer_rate);
716
717 static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
718                                 char *buf)
719 {
720         return sprintf(buf, "%u\n", input_boost_val);
721 }
722
723 static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
724                                  const char *buf, size_t count)
725 {
726         int ret;
727         unsigned long val;
728
729         ret = strict_strtoul(buf, 0, &val);
730         if (ret < 0)
731                 return ret;
732         input_boost_val = val;
733         return count;
734 }
735
736 define_one_global_rw(input_boost);
737
738 static struct attribute *interactive_attributes[] = {
739         &hispeed_freq_attr.attr,
740         &go_hispeed_load_attr.attr,
741         &above_hispeed_delay.attr,
742         &min_sample_time_attr.attr,
743         &timer_rate_attr.attr,
744         &input_boost.attr,
745         NULL,
746 };
747
748 static struct attribute_group interactive_attr_group = {
749         .attrs = interactive_attributes,
750         .name = "interactive",
751 };
752
753 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
754                 unsigned int event)
755 {
756         int rc;
757         unsigned int j;
758         struct cpufreq_interactive_cpuinfo *pcpu;
759         struct cpufreq_frequency_table *freq_table;
760
761         switch (event) {
762         case CPUFREQ_GOV_START:
763                 if (!cpu_online(policy->cpu))
764                         return -EINVAL;
765
766                 freq_table =
767                         cpufreq_frequency_get_table(policy->cpu);
768
769                 for_each_cpu(j, policy->cpus) {
770                         pcpu = &per_cpu(cpuinfo, j);
771                         pcpu->policy = policy;
772                         pcpu->target_freq = policy->cur;
773                         pcpu->freq_table = freq_table;
774                         pcpu->target_set_time_in_idle =
775                                 get_cpu_idle_time_us(j,
776                                              &pcpu->target_set_time);
777                         pcpu->target_validate_time =
778                                 pcpu->target_set_time;
779                         pcpu->target_validate_time_in_idle =
780                                 pcpu->target_set_time_in_idle;
781                         pcpu->governor_enabled = 1;
782                         smp_wmb();
783                 }
784
785                 if (!hispeed_freq)
786                         hispeed_freq = policy->max;
787
788                 /*
789                  * Do not register the idle hook and create sysfs
790                  * entries if we have already done so.
791                  */
792                 if (atomic_inc_return(&active_count) > 1)
793                         return 0;
794
795                 rc = sysfs_create_group(cpufreq_global_kobject,
796                                 &interactive_attr_group);
797                 if (rc)
798                         return rc;
799
800                 rc = input_register_handler(&cpufreq_interactive_input_handler);
801                 if (rc)
802                         pr_warn("%s: failed to register input handler\n",
803                                 __func__);
804
805                 break;
806
807         case CPUFREQ_GOV_STOP:
808                 for_each_cpu(j, policy->cpus) {
809                         pcpu = &per_cpu(cpuinfo, j);
810                         pcpu->governor_enabled = 0;
811                         smp_wmb();
812                         del_timer_sync(&pcpu->cpu_timer);
813
814                         /*
815                          * Reset idle exit time since we may cancel the timer
816                          * before it can run after the last idle exit time,
817                          * to avoid tripping the check in idle exit for a timer
818                          * that is trying to run.
819                          */
820                         pcpu->idle_exit_time = 0;
821                 }
822
823                 flush_work(&freq_scale_down_work);
824                 if (atomic_dec_return(&active_count) > 0)
825                         return 0;
826
827                 input_unregister_handler(&cpufreq_interactive_input_handler);
828                 sysfs_remove_group(cpufreq_global_kobject,
829                                 &interactive_attr_group);
830
831                 break;
832
833         case CPUFREQ_GOV_LIMITS:
834                 if (policy->max < policy->cur)
835                         __cpufreq_driver_target(policy,
836                                         policy->max, CPUFREQ_RELATION_H);
837                 else if (policy->min > policy->cur)
838                         __cpufreq_driver_target(policy,
839                                         policy->min, CPUFREQ_RELATION_L);
840                 break;
841         }
842         return 0;
843 }
844
845 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
846                                              unsigned long val,
847                                              void *data)
848 {
849         switch (val) {
850         case IDLE_START:
851                 cpufreq_interactive_idle_start();
852                 break;
853         case IDLE_END:
854                 cpufreq_interactive_idle_end();
855                 break;
856         }
857
858         return 0;
859 }
860
861 static struct notifier_block cpufreq_interactive_idle_nb = {
862         .notifier_call = cpufreq_interactive_idle_notifier,
863 };
864
865 static int __init cpufreq_interactive_init(void)
866 {
867         unsigned int i;
868         struct cpufreq_interactive_cpuinfo *pcpu;
869         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
870
871         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
872         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
873         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
874         timer_rate = DEFAULT_TIMER_RATE;
875
876         /* Initalize per-cpu timers */
877         for_each_possible_cpu(i) {
878                 pcpu = &per_cpu(cpuinfo, i);
879                 init_timer(&pcpu->cpu_timer);
880                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
881                 pcpu->cpu_timer.data = i;
882         }
883
884         up_task = kthread_create(cpufreq_interactive_up_task, NULL,
885                                  "kinteractiveup");
886         if (IS_ERR(up_task))
887                 return PTR_ERR(up_task);
888
889         sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
890         get_task_struct(up_task);
891
892         /* No rescuer thread, bind to CPU queuing the work for possibly
893            warm cache (probably doesn't matter much). */
894         down_wq = alloc_workqueue("knteractive_down", 0, 1);
895
896         if (!down_wq)
897                 goto err_freeuptask;
898
899         INIT_WORK(&freq_scale_down_work,
900                   cpufreq_interactive_freq_down);
901
902         spin_lock_init(&up_cpumask_lock);
903         spin_lock_init(&down_cpumask_lock);
904         mutex_init(&set_speed_lock);
905
906         idle_notifier_register(&cpufreq_interactive_idle_nb);
907         INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
908         return cpufreq_register_governor(&cpufreq_gov_interactive);
909
910 err_freeuptask:
911         put_task_struct(up_task);
912         return -ENOMEM;
913 }
914
915 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
916 fs_initcall(cpufreq_interactive_init);
917 #else
918 module_init(cpufreq_interactive_init);
919 #endif
920
921 static void __exit cpufreq_interactive_exit(void)
922 {
923         cpufreq_unregister_governor(&cpufreq_gov_interactive);
924         kthread_stop(up_task);
925         put_task_struct(up_task);
926         destroy_workqueue(down_wq);
927 }
928
929 module_exit(cpufreq_interactive_exit);
930
931 MODULE_AUTHOR("Mike Chan <mike@android.com>");
932 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
933         "Latency sensitive workloads");
934 MODULE_LICENSE("GPL");