rk: cpufreq: support rockchip
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include "cpufreq_governor.h"
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 struct cpufreq_interactive_cpuinfo {
39         struct timer_list cpu_timer;
40         struct timer_list cpu_slack_timer;
41         spinlock_t load_lock; /* protects the next 4 fields */
42         u64 time_in_idle;
43         u64 time_in_idle_timestamp;
44         u64 cputime_speedadj;
45         u64 cputime_speedadj_timestamp;
46         struct cpufreq_policy *policy;
47         struct cpufreq_frequency_table *freq_table;
48         unsigned int target_freq;
49         unsigned int floor_freq;
50         u64 floor_validate_time;
51         u64 hispeed_validate_time;
52         struct rw_semaphore enable_sem;
53         int governor_enabled;
54 };
55
56 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
58 /* realtime thread handles frequency scaling */
59 static struct task_struct *speedchange_task;
60 static cpumask_t speedchange_cpumask;
61 static spinlock_t speedchange_cpumask_lock;
62 static struct mutex gov_lock;
63
64 /* Target load.  Lower values result in higher CPU speeds. */
65 #define DEFAULT_TARGET_LOAD 90
66 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
67
68 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
69 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
70 static unsigned int default_above_hispeed_delay[] = {
71         DEFAULT_ABOVE_HISPEED_DELAY };
72
73 struct cpufreq_interactive_tunables {
74         int usage_count;
75         /* Hi speed to bump to from lo speed when load burst (default max) */
76         unsigned int hispeed_freq;
77         /* Go to hi speed when CPU load at or above this value. */
78 #define DEFAULT_GO_HISPEED_LOAD 99
79         unsigned long go_hispeed_load;
80         /* Target load. Lower values result in higher CPU speeds. */
81         spinlock_t target_loads_lock;
82         unsigned int *target_loads;
83         int ntarget_loads;
84         /*
85          * The minimum amount of time to spend at a frequency before we can ramp
86          * down.
87          */
88 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
89         unsigned long min_sample_time;
90         /*
91          * The sample rate of the timer used to increase frequency
92          */
93         unsigned long timer_rate;
94         /*
95          * Wait this long before raising speed above hispeed, by default a
96          * single timer interval.
97          */
98         spinlock_t above_hispeed_delay_lock;
99         unsigned int *above_hispeed_delay;
100         int nabove_hispeed_delay;
101         /* Non-zero means indefinite speed boost active */
102         int boost_val;
103         /* Duration of a boot pulse in usecs */
104         int boostpulse_duration_val;
105         /* End time of boost pulse in ktime converted to usecs */
106         u64 boostpulse_endtime;
107         /*
108          * Max additional time to wait in idle, beyond timer_rate, at speeds
109          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
110          */
111 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
112         int timer_slack_val;
113         bool io_is_busy;
114 };
115
116 /* For cases where we have single governor instance for system */
117 struct cpufreq_interactive_tunables *common_tunables;
118
119 static struct attribute_group *get_sysfs_attr(void);
120
121 static void cpufreq_interactive_timer_resched(
122         struct cpufreq_interactive_cpuinfo *pcpu)
123 {
124         struct cpufreq_interactive_tunables *tunables =
125                 pcpu->policy->governor_data;
126         unsigned long expires;
127         unsigned long flags;
128
129         spin_lock_irqsave(&pcpu->load_lock, flags);
130         pcpu->time_in_idle =
131                 get_cpu_idle_time(smp_processor_id(),
132                                   &pcpu->time_in_idle_timestamp,
133                                   tunables->io_is_busy);
134         pcpu->cputime_speedadj = 0;
135         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
136         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
137         mod_timer_pinned(&pcpu->cpu_timer, expires);
138
139         if (tunables->timer_slack_val >= 0 &&
140             pcpu->target_freq > pcpu->policy->min) {
141                 expires += usecs_to_jiffies(tunables->timer_slack_val);
142                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
143         }
144
145         spin_unlock_irqrestore(&pcpu->load_lock, flags);
146 }
147
148 /* The caller shall take enable_sem write semaphore to avoid any timer race.
149  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
150  * function.
151  */
152 static void cpufreq_interactive_timer_start(
153         struct cpufreq_interactive_tunables *tunables, int cpu)
154 {
155         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
156         unsigned long expires = jiffies +
157                 usecs_to_jiffies(tunables->timer_rate);
158         unsigned long flags;
159
160         pcpu->cpu_timer.expires = expires;
161         add_timer_on(&pcpu->cpu_timer, cpu);
162         if (tunables->timer_slack_val >= 0 &&
163             pcpu->target_freq > pcpu->policy->min) {
164                 expires += usecs_to_jiffies(tunables->timer_slack_val);
165                 pcpu->cpu_slack_timer.expires = expires;
166                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
167         }
168
169         spin_lock_irqsave(&pcpu->load_lock, flags);
170         pcpu->time_in_idle =
171                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
172                                   tunables->io_is_busy);
173         pcpu->cputime_speedadj = 0;
174         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
175         spin_unlock_irqrestore(&pcpu->load_lock, flags);
176 }
177
178 static unsigned int freq_to_above_hispeed_delay(
179         struct cpufreq_interactive_tunables *tunables,
180         unsigned int freq)
181 {
182         int i;
183         unsigned int ret;
184         unsigned long flags;
185
186         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
187
188         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
189                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
190                 ;
191
192         ret = tunables->above_hispeed_delay[i];
193         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
194         return ret;
195 }
196
197 static unsigned int freq_to_targetload(
198         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
199 {
200         int i;
201         unsigned int ret;
202         unsigned long flags;
203
204         spin_lock_irqsave(&tunables->target_loads_lock, flags);
205
206         for (i = 0; i < tunables->ntarget_loads - 1 &&
207                     freq >= tunables->target_loads[i+1]; i += 2)
208                 ;
209
210         ret = tunables->target_loads[i];
211         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
212         return ret;
213 }
214
215 /*
216  * If increasing frequencies never map to a lower target load then
217  * choose_freq() will find the minimum frequency that does not exceed its
218  * target load given the current load.
219  */
220 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
221                 unsigned int loadadjfreq)
222 {
223         unsigned int freq = pcpu->policy->cur;
224         unsigned int prevfreq, freqmin, freqmax;
225         unsigned int tl;
226         int index;
227
228         freqmin = 0;
229         freqmax = UINT_MAX;
230
231         do {
232                 prevfreq = freq;
233                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
234
235                 /*
236                  * Find the lowest frequency where the computed load is less
237                  * than or equal to the target load.
238                  */
239
240                 if (cpufreq_frequency_table_target(
241                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
242                             CPUFREQ_RELATION_L, &index))
243                         break;
244                 freq = pcpu->freq_table[index].frequency;
245
246                 if (freq > prevfreq) {
247                         /* The previous frequency is too low. */
248                         freqmin = prevfreq;
249
250                         if (freq >= freqmax) {
251                                 /*
252                                  * Find the highest frequency that is less
253                                  * than freqmax.
254                                  */
255                                 if (cpufreq_frequency_table_target(
256                                             pcpu->policy, pcpu->freq_table,
257                                             freqmax - 1, CPUFREQ_RELATION_H,
258                                             &index))
259                                         break;
260                                 freq = pcpu->freq_table[index].frequency;
261
262                                 if (freq == freqmin) {
263                                         /*
264                                          * The first frequency below freqmax
265                                          * has already been found to be too
266                                          * low.  freqmax is the lowest speed
267                                          * we found that is fast enough.
268                                          */
269                                         freq = freqmax;
270                                         break;
271                                 }
272                         }
273                 } else if (freq < prevfreq) {
274                         /* The previous frequency is high enough. */
275                         freqmax = prevfreq;
276
277                         if (freq <= freqmin) {
278                                 /*
279                                  * Find the lowest frequency that is higher
280                                  * than freqmin.
281                                  */
282                                 if (cpufreq_frequency_table_target(
283                                             pcpu->policy, pcpu->freq_table,
284                                             freqmin + 1, CPUFREQ_RELATION_L,
285                                             &index))
286                                         break;
287                                 freq = pcpu->freq_table[index].frequency;
288
289                                 /*
290                                  * If freqmax is the first frequency above
291                                  * freqmin then we have already found that
292                                  * this speed is fast enough.
293                                  */
294                                 if (freq == freqmax)
295                                         break;
296                         }
297                 }
298
299                 /* If same frequency chosen as previous then done. */
300         } while (freq != prevfreq);
301
302         return freq;
303 }
304
305 static u64 update_load(int cpu)
306 {
307         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
308         struct cpufreq_interactive_tunables *tunables =
309                 pcpu->policy->governor_data;
310         u64 now;
311         u64 now_idle;
312         u64 delta_idle;
313         u64 delta_time;
314         u64 active_time;
315
316         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
317         delta_idle = (now_idle - pcpu->time_in_idle);
318         delta_time = (now - pcpu->time_in_idle_timestamp);
319
320         if (delta_time <= delta_idle)
321                 active_time = 0;
322         else
323                 active_time = delta_time - delta_idle;
324
325         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
326
327         pcpu->time_in_idle = now_idle;
328         pcpu->time_in_idle_timestamp = now;
329         return now;
330 }
331
332 static void cpufreq_interactive_timer(unsigned long data)
333 {
334         u64 now;
335         unsigned int delta_time;
336         u64 cputime_speedadj;
337         int cpu_load;
338         struct cpufreq_interactive_cpuinfo *pcpu =
339                 &per_cpu(cpuinfo, data);
340         struct cpufreq_interactive_tunables *tunables =
341                 pcpu->policy->governor_data;
342         unsigned int new_freq;
343         unsigned int loadadjfreq;
344         unsigned int index;
345         unsigned long flags;
346         bool boosted;
347
348         if (!down_read_trylock(&pcpu->enable_sem))
349                 return;
350         if (!pcpu->governor_enabled)
351                 goto exit;
352
353         spin_lock_irqsave(&pcpu->load_lock, flags);
354         now = update_load(data);
355         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
356         cputime_speedadj = pcpu->cputime_speedadj;
357         spin_unlock_irqrestore(&pcpu->load_lock, flags);
358
359         if (WARN_ON_ONCE(!delta_time))
360                 goto rearm;
361
362         do_div(cputime_speedadj, delta_time);
363         loadadjfreq = (unsigned int)cputime_speedadj * 100;
364         cpu_load = loadadjfreq / pcpu->target_freq;
365         boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
366
367 #ifdef CONFIG_ARCH_ROCKCHIP
368         pcpu->target_freq = pcpu->policy->cur;
369 #endif
370
371         if (cpu_load >= tunables->go_hispeed_load || boosted) {
372                 if (pcpu->target_freq < tunables->hispeed_freq) {
373                         new_freq = tunables->hispeed_freq;
374                 } else {
375                         new_freq = choose_freq(pcpu, loadadjfreq);
376
377                         if (new_freq < tunables->hispeed_freq)
378                                 new_freq = tunables->hispeed_freq;
379                 }
380         } else {
381                 new_freq = choose_freq(pcpu, loadadjfreq);
382         }
383
384         if (pcpu->target_freq >= tunables->hispeed_freq &&
385             new_freq > pcpu->target_freq &&
386             now - pcpu->hispeed_validate_time <
387             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
388                 trace_cpufreq_interactive_notyet(
389                         data, cpu_load, pcpu->target_freq,
390                         pcpu->policy->cur, new_freq);
391                 goto rearm;
392         }
393
394         pcpu->hispeed_validate_time = now;
395
396         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
397                                            new_freq, CPUFREQ_RELATION_L,
398                                            &index))
399                 goto rearm;
400
401         new_freq = pcpu->freq_table[index].frequency;
402
403         /*
404          * Do not scale below floor_freq unless we have been at or above the
405          * floor frequency for the minimum sample time since last validated.
406          */
407         if (new_freq < pcpu->floor_freq) {
408                 if (now - pcpu->floor_validate_time <
409                                 tunables->min_sample_time) {
410                         trace_cpufreq_interactive_notyet(
411                                 data, cpu_load, pcpu->target_freq,
412                                 pcpu->policy->cur, new_freq);
413                         goto rearm;
414                 }
415         }
416
417         /*
418          * Update the timestamp for checking whether speed has been held at
419          * or above the selected frequency for a minimum of min_sample_time,
420          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
421          * allow the speed to drop as soon as the boostpulse duration expires
422          * (or the indefinite boost is turned off).
423          */
424
425         if (!boosted || new_freq > tunables->hispeed_freq) {
426                 pcpu->floor_freq = new_freq;
427                 pcpu->floor_validate_time = now;
428         }
429
430         if (pcpu->target_freq == new_freq) {
431                 trace_cpufreq_interactive_already(
432                         data, cpu_load, pcpu->target_freq,
433                         pcpu->policy->cur, new_freq);
434                 goto rearm_if_notmax;
435         }
436
437         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
438                                          pcpu->policy->cur, new_freq);
439
440         pcpu->target_freq = new_freq;
441         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
442         cpumask_set_cpu(data, &speedchange_cpumask);
443         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
444         wake_up_process(speedchange_task);
445
446 rearm_if_notmax:
447         /*
448          * Already set max speed and don't see a need to change that,
449          * wait until next idle to re-evaluate, don't need timer.
450          */
451         if (pcpu->target_freq == pcpu->policy->max)
452                 goto exit;
453
454 rearm:
455         if (!timer_pending(&pcpu->cpu_timer))
456                 cpufreq_interactive_timer_resched(pcpu);
457
458 exit:
459         up_read(&pcpu->enable_sem);
460         return;
461 }
462
463 static void cpufreq_interactive_idle_start(void)
464 {
465         struct cpufreq_interactive_cpuinfo *pcpu =
466                 &per_cpu(cpuinfo, smp_processor_id());
467         int pending;
468
469         if (!down_read_trylock(&pcpu->enable_sem))
470                 return;
471         if (!pcpu->governor_enabled) {
472                 up_read(&pcpu->enable_sem);
473                 return;
474         }
475
476         pending = timer_pending(&pcpu->cpu_timer);
477
478         if (pcpu->target_freq != pcpu->policy->min) {
479                 /*
480                  * Entering idle while not at lowest speed.  On some
481                  * platforms this can hold the other CPU(s) at that speed
482                  * even though the CPU is idle. Set a timer to re-evaluate
483                  * speed so this idle CPU doesn't hold the other CPUs above
484                  * min indefinitely.  This should probably be a quirk of
485                  * the CPUFreq driver.
486                  */
487                 if (!pending)
488                         cpufreq_interactive_timer_resched(pcpu);
489         }
490
491         up_read(&pcpu->enable_sem);
492 }
493
494 static void cpufreq_interactive_idle_end(void)
495 {
496         struct cpufreq_interactive_cpuinfo *pcpu =
497                 &per_cpu(cpuinfo, smp_processor_id());
498
499         if (!down_read_trylock(&pcpu->enable_sem))
500                 return;
501         if (!pcpu->governor_enabled) {
502                 up_read(&pcpu->enable_sem);
503                 return;
504         }
505
506         /* Arm the timer for 1-2 ticks later if not already. */
507         if (!timer_pending(&pcpu->cpu_timer)) {
508                 cpufreq_interactive_timer_resched(pcpu);
509         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
510                 del_timer(&pcpu->cpu_timer);
511                 del_timer(&pcpu->cpu_slack_timer);
512                 cpufreq_interactive_timer(smp_processor_id());
513         }
514
515         up_read(&pcpu->enable_sem);
516 }
517
518 static int cpufreq_interactive_speedchange_task(void *data)
519 {
520         unsigned int cpu;
521         cpumask_t tmp_mask;
522         unsigned long flags;
523         struct cpufreq_interactive_cpuinfo *pcpu;
524
525         while (1) {
526                 set_current_state(TASK_INTERRUPTIBLE);
527                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
528
529                 if (cpumask_empty(&speedchange_cpumask)) {
530                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
531                                                flags);
532                         schedule();
533
534                         if (kthread_should_stop())
535                                 break;
536
537                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
538                 }
539
540                 set_current_state(TASK_RUNNING);
541                 tmp_mask = speedchange_cpumask;
542                 cpumask_clear(&speedchange_cpumask);
543                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
544
545                 for_each_cpu(cpu, &tmp_mask) {
546                         unsigned int j;
547                         unsigned int max_freq = 0;
548
549                         pcpu = &per_cpu(cpuinfo, cpu);
550                         if (!down_read_trylock(&pcpu->enable_sem))
551                                 continue;
552                         if (!pcpu->governor_enabled) {
553                                 up_read(&pcpu->enable_sem);
554                                 continue;
555                         }
556
557                         for_each_cpu(j, pcpu->policy->cpus) {
558                                 struct cpufreq_interactive_cpuinfo *pjcpu =
559                                         &per_cpu(cpuinfo, j);
560
561                                 if (pjcpu->target_freq > max_freq)
562                                         max_freq = pjcpu->target_freq;
563                         }
564
565                         if (max_freq != pcpu->policy->cur)
566                                 __cpufreq_driver_target(pcpu->policy,
567                                                         max_freq,
568                                                         CPUFREQ_RELATION_H);
569                         trace_cpufreq_interactive_setspeed(cpu,
570                                                      pcpu->target_freq,
571                                                      pcpu->policy->cur);
572
573                         up_read(&pcpu->enable_sem);
574                 }
575         }
576
577         return 0;
578 }
579
580 static void cpufreq_interactive_boost(void)
581 {
582         int i;
583         int anyboost = 0;
584         unsigned long flags;
585         struct cpufreq_interactive_cpuinfo *pcpu;
586         struct cpufreq_interactive_tunables *tunables;
587
588         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
589
590         for_each_online_cpu(i) {
591                 pcpu = &per_cpu(cpuinfo, i);
592                 tunables = pcpu->policy->governor_data;
593
594                 if (pcpu->target_freq < tunables->hispeed_freq) {
595                         pcpu->target_freq = tunables->hispeed_freq;
596                         cpumask_set_cpu(i, &speedchange_cpumask);
597                         pcpu->hispeed_validate_time =
598                                 ktime_to_us(ktime_get());
599                         anyboost = 1;
600                 }
601
602                 /*
603                  * Set floor freq and (re)start timer for when last
604                  * validated.
605                  */
606
607                 pcpu->floor_freq = tunables->hispeed_freq;
608                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
609         }
610
611         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
612
613         if (anyboost)
614                 wake_up_process(speedchange_task);
615 }
616
617 static int cpufreq_interactive_notifier(
618         struct notifier_block *nb, unsigned long val, void *data)
619 {
620         struct cpufreq_freqs *freq = data;
621         struct cpufreq_interactive_cpuinfo *pcpu;
622         int cpu;
623         unsigned long flags;
624
625         if (val == CPUFREQ_POSTCHANGE) {
626                 pcpu = &per_cpu(cpuinfo, freq->cpu);
627                 if (!down_read_trylock(&pcpu->enable_sem))
628                         return 0;
629                 if (!pcpu->governor_enabled) {
630                         up_read(&pcpu->enable_sem);
631                         return 0;
632                 }
633
634                 for_each_cpu(cpu, pcpu->policy->cpus) {
635                         struct cpufreq_interactive_cpuinfo *pjcpu =
636                                 &per_cpu(cpuinfo, cpu);
637                         if (cpu != freq->cpu) {
638                                 if (!down_read_trylock(&pjcpu->enable_sem))
639                                         continue;
640                                 if (!pjcpu->governor_enabled) {
641                                         up_read(&pjcpu->enable_sem);
642                                         continue;
643                                 }
644                         }
645                         spin_lock_irqsave(&pjcpu->load_lock, flags);
646                         update_load(cpu);
647                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
648                         if (cpu != freq->cpu)
649                                 up_read(&pjcpu->enable_sem);
650                 }
651
652                 up_read(&pcpu->enable_sem);
653         }
654         return 0;
655 }
656
657 static struct notifier_block cpufreq_notifier_block = {
658         .notifier_call = cpufreq_interactive_notifier,
659 };
660
661 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
662 {
663         const char *cp;
664         int i;
665         int ntokens = 1;
666         unsigned int *tokenized_data;
667         int err = -EINVAL;
668
669         cp = buf;
670         while ((cp = strpbrk(cp + 1, " :")))
671                 ntokens++;
672
673         if (!(ntokens & 0x1))
674                 goto err;
675
676         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
677         if (!tokenized_data) {
678                 err = -ENOMEM;
679                 goto err;
680         }
681
682         cp = buf;
683         i = 0;
684         while (i < ntokens) {
685                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
686                         goto err_kfree;
687
688                 cp = strpbrk(cp, " :");
689                 if (!cp)
690                         break;
691                 cp++;
692         }
693
694         if (i != ntokens)
695                 goto err_kfree;
696
697         *num_tokens = ntokens;
698         return tokenized_data;
699
700 err_kfree:
701         kfree(tokenized_data);
702 err:
703         return ERR_PTR(err);
704 }
705
706 static ssize_t show_target_loads(
707         struct cpufreq_interactive_tunables *tunables,
708         char *buf)
709 {
710         int i;
711         ssize_t ret = 0;
712         unsigned long flags;
713
714         spin_lock_irqsave(&tunables->target_loads_lock, flags);
715
716         for (i = 0; i < tunables->ntarget_loads; i++)
717                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
718                                i & 0x1 ? ":" : " ");
719
720         sprintf(buf + ret - 1, "\n");
721         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
722         return ret;
723 }
724
725 static ssize_t store_target_loads(
726         struct cpufreq_interactive_tunables *tunables,
727         const char *buf, size_t count)
728 {
729         int ntokens;
730         unsigned int *new_target_loads = NULL;
731         unsigned long flags;
732
733         new_target_loads = get_tokenized_data(buf, &ntokens);
734         if (IS_ERR(new_target_loads))
735                 return PTR_RET(new_target_loads);
736
737         spin_lock_irqsave(&tunables->target_loads_lock, flags);
738         if (tunables->target_loads != default_target_loads)
739                 kfree(tunables->target_loads);
740         tunables->target_loads = new_target_loads;
741         tunables->ntarget_loads = ntokens;
742         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
743         return count;
744 }
745
746 static ssize_t show_above_hispeed_delay(
747         struct cpufreq_interactive_tunables *tunables, char *buf)
748 {
749         int i;
750         ssize_t ret = 0;
751         unsigned long flags;
752
753         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
754
755         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
756                 ret += sprintf(buf + ret, "%u%s",
757                                tunables->above_hispeed_delay[i],
758                                i & 0x1 ? ":" : " ");
759
760         sprintf(buf + ret - 1, "\n");
761         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
762         return ret;
763 }
764
765 static ssize_t store_above_hispeed_delay(
766         struct cpufreq_interactive_tunables *tunables,
767         const char *buf, size_t count)
768 {
769         int ntokens;
770         unsigned int *new_above_hispeed_delay = NULL;
771         unsigned long flags;
772
773         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
774         if (IS_ERR(new_above_hispeed_delay))
775                 return PTR_RET(new_above_hispeed_delay);
776
777         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
778         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
779                 kfree(tunables->above_hispeed_delay);
780         tunables->above_hispeed_delay = new_above_hispeed_delay;
781         tunables->nabove_hispeed_delay = ntokens;
782         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
783         return count;
784
785 }
786
787 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
788                 char *buf)
789 {
790         return sprintf(buf, "%u\n", tunables->hispeed_freq);
791 }
792
793 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
794                 const char *buf, size_t count)
795 {
796         int ret;
797         long unsigned int val;
798
799         ret = strict_strtoul(buf, 0, &val);
800         if (ret < 0)
801                 return ret;
802         tunables->hispeed_freq = val;
803         return count;
804 }
805
806 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
807                 *tunables, char *buf)
808 {
809         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
810 }
811
812 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
813                 *tunables, const char *buf, size_t count)
814 {
815         int ret;
816         unsigned long val;
817
818         ret = strict_strtoul(buf, 0, &val);
819         if (ret < 0)
820                 return ret;
821         tunables->go_hispeed_load = val;
822         return count;
823 }
824
825 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
826                 *tunables, char *buf)
827 {
828         return sprintf(buf, "%lu\n", tunables->min_sample_time);
829 }
830
831 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
832                 *tunables, const char *buf, size_t count)
833 {
834         int ret;
835         unsigned long val;
836
837         ret = strict_strtoul(buf, 0, &val);
838         if (ret < 0)
839                 return ret;
840         tunables->min_sample_time = val;
841         return count;
842 }
843
844 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
845                 char *buf)
846 {
847         return sprintf(buf, "%lu\n", tunables->timer_rate);
848 }
849
850 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
851                 const char *buf, size_t count)
852 {
853         int ret;
854         unsigned long val;
855
856         ret = strict_strtoul(buf, 0, &val);
857         if (ret < 0)
858                 return ret;
859         tunables->timer_rate = val;
860         return count;
861 }
862
863 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
864                 char *buf)
865 {
866         return sprintf(buf, "%d\n", tunables->timer_slack_val);
867 }
868
869 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
870                 const char *buf, size_t count)
871 {
872         int ret;
873         unsigned long val;
874
875         ret = kstrtol(buf, 10, &val);
876         if (ret < 0)
877                 return ret;
878
879         tunables->timer_slack_val = val;
880         return count;
881 }
882
883 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
884                           char *buf)
885 {
886         return sprintf(buf, "%d\n", tunables->boost_val);
887 }
888
889 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
890                            const char *buf, size_t count)
891 {
892         int ret;
893         unsigned long val;
894
895         ret = kstrtoul(buf, 0, &val);
896         if (ret < 0)
897                 return ret;
898
899         tunables->boost_val = val;
900
901         if (tunables->boost_val) {
902                 trace_cpufreq_interactive_boost("on");
903                 cpufreq_interactive_boost();
904         } else {
905                 trace_cpufreq_interactive_unboost("off");
906         }
907
908         return count;
909 }
910
911 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
912                                 const char *buf, size_t count)
913 {
914         int ret;
915         unsigned long val;
916
917         ret = kstrtoul(buf, 0, &val);
918         if (ret < 0)
919                 return ret;
920
921         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
922                 tunables->boostpulse_duration_val;
923         trace_cpufreq_interactive_boost("pulse");
924         cpufreq_interactive_boost();
925         return count;
926 }
927
928 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
929                 *tunables, char *buf)
930 {
931         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
932 }
933
934 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
935                 *tunables, const char *buf, size_t count)
936 {
937         int ret;
938         unsigned long val;
939
940         ret = kstrtoul(buf, 0, &val);
941         if (ret < 0)
942                 return ret;
943
944         tunables->boostpulse_duration_val = val;
945         return count;
946 }
947
948 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
949                 char *buf)
950 {
951         return sprintf(buf, "%u\n", tunables->io_is_busy);
952 }
953
954 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
955                 const char *buf, size_t count)
956 {
957         int ret;
958         unsigned long val;
959
960         ret = kstrtoul(buf, 0, &val);
961         if (ret < 0)
962                 return ret;
963         tunables->io_is_busy = val;
964         return count;
965 }
966
967 /*
968  * Create show/store routines
969  * - sys: One governor instance for complete SYSTEM
970  * - pol: One governor instance per struct cpufreq_policy
971  */
972 #define show_gov_pol_sys(file_name)                                     \
973 static ssize_t show_##file_name##_gov_sys                               \
974 (struct kobject *kobj, struct attribute *attr, char *buf)               \
975 {                                                                       \
976         return show_##file_name(common_tunables, buf);                  \
977 }                                                                       \
978                                                                         \
979 static ssize_t show_##file_name##_gov_pol                               \
980 (struct cpufreq_policy *policy, char *buf)                              \
981 {                                                                       \
982         return show_##file_name(policy->governor_data, buf);            \
983 }
984
985 #define store_gov_pol_sys(file_name)                                    \
986 static ssize_t store_##file_name##_gov_sys                              \
987 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
988         size_t count)                                                   \
989 {                                                                       \
990         return store_##file_name(common_tunables, buf, count);          \
991 }                                                                       \
992                                                                         \
993 static ssize_t store_##file_name##_gov_pol                              \
994 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
995 {                                                                       \
996         return store_##file_name(policy->governor_data, buf, count);    \
997 }
998
999 #define show_store_gov_pol_sys(file_name)                               \
1000 show_gov_pol_sys(file_name);                                            \
1001 store_gov_pol_sys(file_name)
1002
1003 show_store_gov_pol_sys(target_loads);
1004 show_store_gov_pol_sys(above_hispeed_delay);
1005 show_store_gov_pol_sys(hispeed_freq);
1006 show_store_gov_pol_sys(go_hispeed_load);
1007 show_store_gov_pol_sys(min_sample_time);
1008 show_store_gov_pol_sys(timer_rate);
1009 show_store_gov_pol_sys(timer_slack);
1010 show_store_gov_pol_sys(boost);
1011 store_gov_pol_sys(boostpulse);
1012 show_store_gov_pol_sys(boostpulse_duration);
1013 show_store_gov_pol_sys(io_is_busy);
1014
1015 #define gov_sys_attr_rw(_name)                                          \
1016 static struct global_attr _name##_gov_sys =                             \
1017 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1018
1019 #define gov_pol_attr_rw(_name)                                          \
1020 static struct freq_attr _name##_gov_pol =                               \
1021 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1022
1023 #define gov_sys_pol_attr_rw(_name)                                      \
1024         gov_sys_attr_rw(_name);                                         \
1025         gov_pol_attr_rw(_name)
1026
1027 gov_sys_pol_attr_rw(target_loads);
1028 gov_sys_pol_attr_rw(above_hispeed_delay);
1029 gov_sys_pol_attr_rw(hispeed_freq);
1030 gov_sys_pol_attr_rw(go_hispeed_load);
1031 gov_sys_pol_attr_rw(min_sample_time);
1032 gov_sys_pol_attr_rw(timer_rate);
1033 gov_sys_pol_attr_rw(timer_slack);
1034 gov_sys_pol_attr_rw(boost);
1035 gov_sys_pol_attr_rw(boostpulse_duration);
1036 gov_sys_pol_attr_rw(io_is_busy);
1037
1038 static struct global_attr boostpulse_gov_sys =
1039         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1040
1041 static struct freq_attr boostpulse_gov_pol =
1042         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1043
1044 /* One Governor instance for entire system */
1045 static struct attribute *interactive_attributes_gov_sys[] = {
1046         &target_loads_gov_sys.attr,
1047         &above_hispeed_delay_gov_sys.attr,
1048         &hispeed_freq_gov_sys.attr,
1049         &go_hispeed_load_gov_sys.attr,
1050         &min_sample_time_gov_sys.attr,
1051         &timer_rate_gov_sys.attr,
1052         &timer_slack_gov_sys.attr,
1053         &boost_gov_sys.attr,
1054         &boostpulse_gov_sys.attr,
1055         &boostpulse_duration_gov_sys.attr,
1056         &io_is_busy_gov_sys.attr,
1057         NULL,
1058 };
1059
1060 static struct attribute_group interactive_attr_group_gov_sys = {
1061         .attrs = interactive_attributes_gov_sys,
1062         .name = "interactive",
1063 };
1064
1065 /* Per policy governor instance */
1066 static struct attribute *interactive_attributes_gov_pol[] = {
1067         &target_loads_gov_pol.attr,
1068         &above_hispeed_delay_gov_pol.attr,
1069         &hispeed_freq_gov_pol.attr,
1070         &go_hispeed_load_gov_pol.attr,
1071         &min_sample_time_gov_pol.attr,
1072         &timer_rate_gov_pol.attr,
1073         &timer_slack_gov_pol.attr,
1074         &boost_gov_pol.attr,
1075         &boostpulse_gov_pol.attr,
1076         &boostpulse_duration_gov_pol.attr,
1077         &io_is_busy_gov_pol.attr,
1078         NULL,
1079 };
1080
1081 static struct attribute_group interactive_attr_group_gov_pol = {
1082         .attrs = interactive_attributes_gov_pol,
1083         .name = "interactive",
1084 };
1085
1086 static struct attribute_group *get_sysfs_attr(void)
1087 {
1088         if (have_governor_per_policy())
1089                 return &interactive_attr_group_gov_pol;
1090         else
1091                 return &interactive_attr_group_gov_sys;
1092 }
1093
1094 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1095                                              unsigned long val,
1096                                              void *data)
1097 {
1098         switch (val) {
1099         case IDLE_START:
1100                 cpufreq_interactive_idle_start();
1101                 break;
1102         case IDLE_END:
1103                 cpufreq_interactive_idle_end();
1104                 break;
1105         }
1106
1107         return 0;
1108 }
1109
1110 static struct notifier_block cpufreq_interactive_idle_nb = {
1111         .notifier_call = cpufreq_interactive_idle_notifier,
1112 };
1113
1114 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1115                 unsigned int event)
1116 {
1117         int rc;
1118         unsigned int j;
1119         struct cpufreq_interactive_cpuinfo *pcpu;
1120         struct cpufreq_frequency_table *freq_table;
1121         struct cpufreq_interactive_tunables *tunables;
1122
1123         if (have_governor_per_policy())
1124                 tunables = policy->governor_data;
1125         else
1126                 tunables = common_tunables;
1127
1128         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1129
1130         switch (event) {
1131         case CPUFREQ_GOV_POLICY_INIT:
1132                 if (have_governor_per_policy()) {
1133                         WARN_ON(tunables);
1134                 } else if (tunables) {
1135                         tunables->usage_count++;
1136                         policy->governor_data = tunables;
1137                         return 0;
1138                 }
1139
1140                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1141                 if (!tunables) {
1142                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1143                         return -ENOMEM;
1144                 }
1145
1146                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1147                                 get_sysfs_attr());
1148                 if (rc) {
1149                         kfree(tunables);
1150                         return rc;
1151                 }
1152
1153                 tunables->usage_count = 1;
1154                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1155                 tunables->nabove_hispeed_delay =
1156                         ARRAY_SIZE(default_above_hispeed_delay);
1157                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1158                 tunables->target_loads = default_target_loads;
1159                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1160                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1161                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1162                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1163                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1164
1165 #ifdef CONFIG_ARCH_ROCKCHIP
1166                 {
1167                         unsigned int index;
1168                         freq_table = cpufreq_frequency_get_table(policy->cpu);
1169                         tunables->hispeed_freq = policy->max;
1170                         if (policy->min < 816000)
1171                                 tunables->hispeed_freq = 816000;
1172                         else if (cpufreq_frequency_table_target(policy, freq_table, policy->min + 1, CPUFREQ_RELATION_L, &index) == 0)
1173                                 tunables->hispeed_freq = freq_table[index].frequency;
1174                         if (policy->max > 1416000) {
1175                                 tunables->timer_slack_val = 20 * USEC_PER_MSEC;
1176                                 tunables->min_sample_time = 40 * USEC_PER_MSEC;
1177                                 tunables->above_hispeed_delay[0] = 80 * USEC_PER_MSEC;
1178                                 store_target_loads(tunables, "70 1200000:80 1416000:99", 0);
1179                         }
1180                         tunables->boostpulse_duration_val = 500 * USEC_PER_MSEC;
1181                 }
1182 #endif
1183
1184                 spin_lock_init(&tunables->target_loads_lock);
1185                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1186
1187                 if (!policy->governor->initialized) {
1188                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1189                         cpufreq_register_notifier(&cpufreq_notifier_block,
1190                                         CPUFREQ_TRANSITION_NOTIFIER);
1191                 }
1192
1193                 policy->governor_data = tunables;
1194                 if (!have_governor_per_policy())
1195                         common_tunables = tunables;
1196
1197                 break;
1198
1199         case CPUFREQ_GOV_POLICY_EXIT:
1200                 if (!--tunables->usage_count) {
1201                         if (policy->governor->initialized == 1) {
1202                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1203                                                 CPUFREQ_TRANSITION_NOTIFIER);
1204                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1205                         }
1206
1207                         sysfs_remove_group(get_governor_parent_kobj(policy),
1208                                         get_sysfs_attr());
1209                         kfree(tunables);
1210                         common_tunables = NULL;
1211                 }
1212
1213                 policy->governor_data = NULL;
1214                 break;
1215
1216         case CPUFREQ_GOV_START:
1217                 mutex_lock(&gov_lock);
1218
1219                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1220                 if (!tunables->hispeed_freq)
1221                         tunables->hispeed_freq = policy->max;
1222
1223                 for_each_cpu(j, policy->cpus) {
1224                         pcpu = &per_cpu(cpuinfo, j);
1225                         pcpu->policy = policy;
1226                         pcpu->target_freq = policy->cur;
1227                         pcpu->freq_table = freq_table;
1228                         pcpu->floor_freq = pcpu->target_freq;
1229                         pcpu->floor_validate_time =
1230                                 ktime_to_us(ktime_get());
1231                         pcpu->hispeed_validate_time =
1232                                 pcpu->floor_validate_time;
1233                         down_write(&pcpu->enable_sem);
1234                         del_timer_sync(&pcpu->cpu_timer);
1235                         del_timer_sync(&pcpu->cpu_slack_timer);
1236                         cpufreq_interactive_timer_start(tunables, j);
1237                         pcpu->governor_enabled = 1;
1238                         up_write(&pcpu->enable_sem);
1239                 }
1240
1241                 mutex_unlock(&gov_lock);
1242                 break;
1243
1244         case CPUFREQ_GOV_STOP:
1245                 mutex_lock(&gov_lock);
1246                 for_each_cpu(j, policy->cpus) {
1247                         pcpu = &per_cpu(cpuinfo, j);
1248                         down_write(&pcpu->enable_sem);
1249                         pcpu->governor_enabled = 0;
1250                         del_timer_sync(&pcpu->cpu_timer);
1251                         del_timer_sync(&pcpu->cpu_slack_timer);
1252                         up_write(&pcpu->enable_sem);
1253                 }
1254
1255                 mutex_unlock(&gov_lock);
1256                 break;
1257
1258         case CPUFREQ_GOV_LIMITS:
1259                 if (policy->max < policy->cur)
1260                         __cpufreq_driver_target(policy,
1261                                         policy->max, CPUFREQ_RELATION_H);
1262                 else if (policy->min > policy->cur)
1263                         __cpufreq_driver_target(policy,
1264                                         policy->min, CPUFREQ_RELATION_L);
1265                 for_each_cpu(j, policy->cpus) {
1266                         pcpu = &per_cpu(cpuinfo, j);
1267
1268                         /* hold write semaphore to avoid race */
1269                         down_write(&pcpu->enable_sem);
1270                         if (pcpu->governor_enabled == 0) {
1271                                 up_write(&pcpu->enable_sem);
1272                                 continue;
1273                         }
1274
1275                         /* update target_freq firstly */
1276                         if (policy->max < pcpu->target_freq)
1277                                 pcpu->target_freq = policy->max;
1278                         else if (policy->min > pcpu->target_freq)
1279                                 pcpu->target_freq = policy->min;
1280
1281                         /* Reschedule timer.
1282                          * Delete the timers, else the timer callback may
1283                          * return without re-arm the timer when failed
1284                          * acquire the semaphore. This race may cause timer
1285                          * stopped unexpectedly.
1286                          */
1287                         del_timer_sync(&pcpu->cpu_timer);
1288                         del_timer_sync(&pcpu->cpu_slack_timer);
1289                         cpufreq_interactive_timer_start(tunables, j);
1290                         up_write(&pcpu->enable_sem);
1291                 }
1292                 break;
1293         }
1294         return 0;
1295 }
1296
1297 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1298 static
1299 #endif
1300 struct cpufreq_governor cpufreq_gov_interactive = {
1301         .name = "interactive",
1302         .governor = cpufreq_governor_interactive,
1303         .max_transition_latency = 10000000,
1304         .owner = THIS_MODULE,
1305 };
1306
1307 static void cpufreq_interactive_nop_timer(unsigned long data)
1308 {
1309 }
1310
1311 static int __init cpufreq_interactive_init(void)
1312 {
1313         unsigned int i;
1314         struct cpufreq_interactive_cpuinfo *pcpu;
1315         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1316
1317         /* Initalize per-cpu timers */
1318         for_each_possible_cpu(i) {
1319                 pcpu = &per_cpu(cpuinfo, i);
1320                 init_timer_deferrable(&pcpu->cpu_timer);
1321                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1322                 pcpu->cpu_timer.data = i;
1323                 init_timer(&pcpu->cpu_slack_timer);
1324                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1325                 spin_lock_init(&pcpu->load_lock);
1326                 init_rwsem(&pcpu->enable_sem);
1327         }
1328
1329         spin_lock_init(&speedchange_cpumask_lock);
1330         mutex_init(&gov_lock);
1331         speedchange_task =
1332                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1333                                "cfinteractive");
1334         if (IS_ERR(speedchange_task))
1335                 return PTR_ERR(speedchange_task);
1336
1337         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1338         get_task_struct(speedchange_task);
1339
1340         /* NB: wake up so the thread does not look hung to the freezer */
1341         wake_up_process(speedchange_task);
1342
1343         return cpufreq_register_governor(&cpufreq_gov_interactive);
1344 }
1345
1346 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1347 fs_initcall(cpufreq_interactive_init);
1348 #else
1349 module_init(cpufreq_interactive_init);
1350 #endif
1351
1352 static void __exit cpufreq_interactive_exit(void)
1353 {
1354         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1355         kthread_stop(speedchange_task);
1356         put_task_struct(speedchange_task);
1357 }
1358
1359 module_exit(cpufreq_interactive_exit);
1360
1361 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1362 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1363         "Latency sensitive workloads");
1364 MODULE_LICENSE("GPL");