Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34  * The "cpufreq driver" - the arch- or hardware-dependent low
35  * level driver of CPUFreq support, and its spinlock. This lock
36  * also protects the cpufreq_cpu_data array.
37  */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
50
51 static inline bool has_target(void)
52 {
53         return cpufreq_driver->target_index || cpufreq_driver->target;
54 }
55
56 /*
57  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
58  * sections
59  */
60 static DECLARE_RWSEM(cpufreq_rwsem);
61
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
64                 unsigned int event);
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
67
68 /**
69  * Two notifier lists: the "policy" list is involved in the
70  * validation process for a new CPU frequency policy; the
71  * "transition" list for kernel code that needs to handle
72  * changes to devices when the CPU clock speed changes.
73  * The mutex locks both lists.
74  */
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
80 {
81         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82         init_cpufreq_transition_notifier_list_called = true;
83         return 0;
84 }
85 pure_initcall(init_cpufreq_transition_notifier_list);
86
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
89 {
90         return off;
91 }
92 void disable_cpufreq(void)
93 {
94         off = 1;
95 }
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
98
99 bool have_governor_per_policy(void)
100 {
101         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 }
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 {
107         if (have_governor_per_policy())
108                 return &policy->kobj;
109         else
110                 return cpufreq_global_kobject;
111 }
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115 {
116         u64 idle_time;
117         u64 cur_wall_time;
118         u64 busy_time;
119
120         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121
122         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129         idle_time = cur_wall_time - busy_time;
130         if (wall)
131                 *wall = cputime_to_usecs(cur_wall_time);
132
133         return cputime_to_usecs(idle_time);
134 }
135
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 {
138         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140         if (idle_time == -1ULL)
141                 return get_cpu_idle_time_jiffy(cpu, wall);
142         else if (!io_busy)
143                 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145         return idle_time;
146 }
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
149 /*
150  * This is a generic cpufreq init() routine which can be used by cpufreq
151  * drivers of SMP systems. It will do following:
152  * - validate & show freq table passed
153  * - set policies transition latency
154  * - policy->cpus with all possible CPUs
155  */
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157                 struct cpufreq_frequency_table *table,
158                 unsigned int transition_latency)
159 {
160         int ret;
161
162         ret = cpufreq_table_validate_and_show(policy, table);
163         if (ret) {
164                 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
165                 return ret;
166         }
167
168         policy->cpuinfo.transition_latency = transition_latency;
169
170         /*
171          * The driver only supports the SMP configuartion where all processors
172          * share the clock and voltage and clock.
173          */
174         cpumask_setall(policy->cpus);
175
176         return 0;
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
180 unsigned int cpufreq_generic_get(unsigned int cpu)
181 {
182         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184         if (!policy || IS_ERR(policy->clk)) {
185                 pr_err("%s: No %s associated to cpu: %d\n",
186                        __func__, policy ? "clk" : "policy", cpu);
187                 return 0;
188         }
189
190         return clk_get_rate(policy->clk) / 1000;
191 }
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193
194 /* Only for cpufreq core internal use */
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196 {
197         return per_cpu(cpufreq_cpu_data, cpu);
198 }
199
200 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
201 {
202         struct cpufreq_policy *policy = NULL;
203         unsigned long flags;
204
205         if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
206                 return NULL;
207
208         if (!down_read_trylock(&cpufreq_rwsem))
209                 return NULL;
210
211         /* get the cpufreq driver */
212         read_lock_irqsave(&cpufreq_driver_lock, flags);
213
214         if (cpufreq_driver) {
215                 /* get the CPU */
216                 policy = per_cpu(cpufreq_cpu_data, cpu);
217                 if (policy)
218                         kobject_get(&policy->kobj);
219         }
220
221         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
222
223         if (!policy)
224                 up_read(&cpufreq_rwsem);
225
226         return policy;
227 }
228 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229
230 void cpufreq_cpu_put(struct cpufreq_policy *policy)
231 {
232         if (cpufreq_disabled())
233                 return;
234
235         kobject_put(&policy->kobj);
236         up_read(&cpufreq_rwsem);
237 }
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
239
240 /*********************************************************************
241  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
242  *********************************************************************/
243
244 /**
245  * adjust_jiffies - adjust the system "loops_per_jiffy"
246  *
247  * This function alters the system "loops_per_jiffy" for the clock
248  * speed change. Note that loops_per_jiffy cannot be updated on SMP
249  * systems as each CPU might be scaled differently. So, use the arch
250  * per-CPU loops_per_jiffy value wherever possible.
251  */
252 #ifndef CONFIG_SMP
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
255
256 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257 {
258         if (ci->flags & CPUFREQ_CONST_LOOPS)
259                 return;
260
261         if (!l_p_j_ref_freq) {
262                 l_p_j_ref = loops_per_jiffy;
263                 l_p_j_ref_freq = ci->old;
264                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265                          l_p_j_ref, l_p_j_ref_freq);
266         }
267         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
268                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
269                                                                 ci->new);
270                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271                          loops_per_jiffy, ci->new);
272         }
273 }
274 #else
275 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276 {
277         return;
278 }
279 #endif
280
281 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282                 struct cpufreq_freqs *freqs, unsigned int state)
283 {
284         BUG_ON(irqs_disabled());
285
286         if (cpufreq_disabled())
287                 return;
288
289         freqs->flags = cpufreq_driver->flags;
290         pr_debug("notification %u of frequency transition to %u kHz\n",
291                  state, freqs->new);
292
293         switch (state) {
294
295         case CPUFREQ_PRECHANGE:
296                 /* detect if the driver reported a value as "old frequency"
297                  * which is not equal to what the cpufreq core thinks is
298                  * "old frequency".
299                  */
300                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
301                         if ((policy) && (policy->cpu == freqs->cpu) &&
302                             (policy->cur) && (policy->cur != freqs->old)) {
303                                 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
304                                          freqs->old, policy->cur);
305                                 freqs->old = policy->cur;
306                         }
307                 }
308                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
309                                 CPUFREQ_PRECHANGE, freqs);
310                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
311                 break;
312
313         case CPUFREQ_POSTCHANGE:
314                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
315                 pr_debug("FREQ: %lu - CPU: %lu\n",
316                          (unsigned long)freqs->new, (unsigned long)freqs->cpu);
317                 trace_cpu_frequency(freqs->new, freqs->cpu);
318                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
319                                 CPUFREQ_POSTCHANGE, freqs);
320                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
321                         policy->cur = freqs->new;
322                 break;
323         }
324 }
325
326 /**
327  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
328  * on frequency transition.
329  *
330  * This function calls the transition notifiers and the "adjust_jiffies"
331  * function. It is called twice on all CPU frequency changes that have
332  * external effects.
333  */
334 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
335                 struct cpufreq_freqs *freqs, unsigned int state)
336 {
337         for_each_cpu(freqs->cpu, policy->cpus)
338                 __cpufreq_notify_transition(policy, freqs, state);
339 }
340
341 /* Do post notifications when there are chances that transition has failed */
342 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
343                 struct cpufreq_freqs *freqs, int transition_failed)
344 {
345         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
346         if (!transition_failed)
347                 return;
348
349         swap(freqs->old, freqs->new);
350         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
351         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
352 }
353
354 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
355                 struct cpufreq_freqs *freqs)
356 {
357
358         /*
359          * Catch double invocations of _begin() which lead to self-deadlock.
360          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
361          * doesn't invoke _begin() on their behalf, and hence the chances of
362          * double invocations are very low. Moreover, there are scenarios
363          * where these checks can emit false-positive warnings in these
364          * drivers; so we avoid that by skipping them altogether.
365          */
366         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
367                                 && current == policy->transition_task);
368
369 wait:
370         wait_event(policy->transition_wait, !policy->transition_ongoing);
371
372         spin_lock(&policy->transition_lock);
373
374         if (unlikely(policy->transition_ongoing)) {
375                 spin_unlock(&policy->transition_lock);
376                 goto wait;
377         }
378
379         policy->transition_ongoing = true;
380         policy->transition_task = current;
381
382         spin_unlock(&policy->transition_lock);
383
384         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
385 }
386 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
387
388 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
389                 struct cpufreq_freqs *freqs, int transition_failed)
390 {
391         if (unlikely(WARN_ON(!policy->transition_ongoing)))
392                 return;
393
394         cpufreq_notify_post_transition(policy, freqs, transition_failed);
395
396         policy->transition_ongoing = false;
397         policy->transition_task = NULL;
398
399         wake_up(&policy->transition_wait);
400 }
401 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
402
403
404 /*********************************************************************
405  *                          SYSFS INTERFACE                          *
406  *********************************************************************/
407 static ssize_t show_boost(struct kobject *kobj,
408                                  struct attribute *attr, char *buf)
409 {
410         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
411 }
412
413 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
414                                   const char *buf, size_t count)
415 {
416         int ret, enable;
417
418         ret = sscanf(buf, "%d", &enable);
419         if (ret != 1 || enable < 0 || enable > 1)
420                 return -EINVAL;
421
422         if (cpufreq_boost_trigger_state(enable)) {
423                 pr_err("%s: Cannot %s BOOST!\n",
424                        __func__, enable ? "enable" : "disable");
425                 return -EINVAL;
426         }
427
428         pr_debug("%s: cpufreq BOOST %s\n",
429                  __func__, enable ? "enabled" : "disabled");
430
431         return count;
432 }
433 define_one_global_rw(boost);
434
435 static struct cpufreq_governor *__find_governor(const char *str_governor)
436 {
437         struct cpufreq_governor *t;
438
439         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
440                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
441                         return t;
442
443         return NULL;
444 }
445
446 /**
447  * cpufreq_parse_governor - parse a governor string
448  */
449 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
450                                 struct cpufreq_governor **governor)
451 {
452         int err = -EINVAL;
453
454         if (!cpufreq_driver)
455                 goto out;
456
457         if (cpufreq_driver->setpolicy) {
458                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
459                         *policy = CPUFREQ_POLICY_PERFORMANCE;
460                         err = 0;
461                 } else if (!strnicmp(str_governor, "powersave",
462                                                 CPUFREQ_NAME_LEN)) {
463                         *policy = CPUFREQ_POLICY_POWERSAVE;
464                         err = 0;
465                 }
466         } else if (has_target()) {
467                 struct cpufreq_governor *t;
468
469                 mutex_lock(&cpufreq_governor_mutex);
470
471                 t = __find_governor(str_governor);
472
473                 if (t == NULL) {
474                         int ret;
475
476                         mutex_unlock(&cpufreq_governor_mutex);
477                         ret = request_module("cpufreq_%s", str_governor);
478                         mutex_lock(&cpufreq_governor_mutex);
479
480                         if (ret == 0)
481                                 t = __find_governor(str_governor);
482                 }
483
484                 if (t != NULL) {
485                         *governor = t;
486                         err = 0;
487                 }
488
489                 mutex_unlock(&cpufreq_governor_mutex);
490         }
491 out:
492         return err;
493 }
494
495 /**
496  * cpufreq_per_cpu_attr_read() / show_##file_name() -
497  * print out cpufreq information
498  *
499  * Write out information from cpufreq_driver->policy[cpu]; object must be
500  * "unsigned int".
501  */
502
503 #define show_one(file_name, object)                     \
504 static ssize_t show_##file_name                         \
505 (struct cpufreq_policy *policy, char *buf)              \
506 {                                                       \
507         return sprintf(buf, "%u\n", policy->object);    \
508 }
509
510 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
511 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513 show_one(scaling_min_freq, min);
514 show_one(scaling_max_freq, max);
515 show_one(scaling_cur_freq, cur);
516
517 static int cpufreq_set_policy(struct cpufreq_policy *policy,
518                                 struct cpufreq_policy *new_policy);
519
520 /**
521  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
522  */
523 #define store_one(file_name, object)                    \
524 static ssize_t store_##file_name                                        \
525 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
526 {                                                                       \
527         int ret;                                                        \
528         struct cpufreq_policy new_policy;                               \
529                                                                         \
530         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
531         if (ret)                                                        \
532                 return -EINVAL;                                         \
533                                                                         \
534         ret = sscanf(buf, "%u", &new_policy.object);                    \
535         if (ret != 1)                                                   \
536                 return -EINVAL;                                         \
537                                                                         \
538         ret = cpufreq_set_policy(policy, &new_policy);          \
539         policy->user_policy.object = policy->object;                    \
540                                                                         \
541         return ret ? ret : count;                                       \
542 }
543
544 store_one(scaling_min_freq, min);
545 store_one(scaling_max_freq, max);
546
547 /**
548  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
549  */
550 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
551                                         char *buf)
552 {
553         unsigned int cur_freq = __cpufreq_get(policy->cpu);
554         if (!cur_freq)
555                 return sprintf(buf, "<unknown>");
556         return sprintf(buf, "%u\n", cur_freq);
557 }
558
559 /**
560  * show_scaling_governor - show the current policy for the specified CPU
561  */
562 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
563 {
564         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
565                 return sprintf(buf, "powersave\n");
566         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
567                 return sprintf(buf, "performance\n");
568         else if (policy->governor)
569                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
570                                 policy->governor->name);
571         return -EINVAL;
572 }
573
574 /**
575  * store_scaling_governor - store policy for the specified CPU
576  */
577 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
578                                         const char *buf, size_t count)
579 {
580         int ret;
581         char    str_governor[16];
582         struct cpufreq_policy new_policy;
583
584         ret = cpufreq_get_policy(&new_policy, policy->cpu);
585         if (ret)
586                 return ret;
587
588         ret = sscanf(buf, "%15s", str_governor);
589         if (ret != 1)
590                 return -EINVAL;
591
592         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
593                                                 &new_policy.governor))
594                 return -EINVAL;
595
596         ret = cpufreq_set_policy(policy, &new_policy);
597
598         policy->user_policy.policy = policy->policy;
599         policy->user_policy.governor = policy->governor;
600
601         if (ret)
602                 return ret;
603         else
604                 return count;
605 }
606
607 /**
608  * show_scaling_driver - show the cpufreq driver currently loaded
609  */
610 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
611 {
612         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
613 }
614
615 /**
616  * show_scaling_available_governors - show the available CPUfreq governors
617  */
618 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
619                                                 char *buf)
620 {
621         ssize_t i = 0;
622         struct cpufreq_governor *t;
623
624         if (!has_target()) {
625                 i += sprintf(buf, "performance powersave");
626                 goto out;
627         }
628
629         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
630                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
631                     - (CPUFREQ_NAME_LEN + 2)))
632                         goto out;
633                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
634         }
635 out:
636         i += sprintf(&buf[i], "\n");
637         return i;
638 }
639
640 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
641 {
642         ssize_t i = 0;
643         unsigned int cpu;
644
645         for_each_cpu(cpu, mask) {
646                 if (i)
647                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
648                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
649                 if (i >= (PAGE_SIZE - 5))
650                         break;
651         }
652         i += sprintf(&buf[i], "\n");
653         return i;
654 }
655 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
656
657 /**
658  * show_related_cpus - show the CPUs affected by each transition even if
659  * hw coordination is in use
660  */
661 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
662 {
663         return cpufreq_show_cpus(policy->related_cpus, buf);
664 }
665
666 /**
667  * show_affected_cpus - show the CPUs affected by each transition
668  */
669 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
670 {
671         return cpufreq_show_cpus(policy->cpus, buf);
672 }
673
674 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
675                                         const char *buf, size_t count)
676 {
677         unsigned int freq = 0;
678         unsigned int ret;
679
680         if (!policy->governor || !policy->governor->store_setspeed)
681                 return -EINVAL;
682
683         ret = sscanf(buf, "%u", &freq);
684         if (ret != 1)
685                 return -EINVAL;
686
687         policy->governor->store_setspeed(policy, freq);
688
689         return count;
690 }
691
692 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
693 {
694         if (!policy->governor || !policy->governor->show_setspeed)
695                 return sprintf(buf, "<unsupported>\n");
696
697         return policy->governor->show_setspeed(policy, buf);
698 }
699
700 /**
701  * show_bios_limit - show the current cpufreq HW/BIOS limitation
702  */
703 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
704 {
705         unsigned int limit;
706         int ret;
707         if (cpufreq_driver->bios_limit) {
708                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
709                 if (!ret)
710                         return sprintf(buf, "%u\n", limit);
711         }
712         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
713 }
714
715 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
716 cpufreq_freq_attr_ro(cpuinfo_min_freq);
717 cpufreq_freq_attr_ro(cpuinfo_max_freq);
718 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
719 cpufreq_freq_attr_ro(scaling_available_governors);
720 cpufreq_freq_attr_ro(scaling_driver);
721 cpufreq_freq_attr_ro(scaling_cur_freq);
722 cpufreq_freq_attr_ro(bios_limit);
723 cpufreq_freq_attr_ro(related_cpus);
724 cpufreq_freq_attr_ro(affected_cpus);
725 cpufreq_freq_attr_rw(scaling_min_freq);
726 cpufreq_freq_attr_rw(scaling_max_freq);
727 cpufreq_freq_attr_rw(scaling_governor);
728 cpufreq_freq_attr_rw(scaling_setspeed);
729
730 static struct attribute *default_attrs[] = {
731         &cpuinfo_min_freq.attr,
732         &cpuinfo_max_freq.attr,
733         &cpuinfo_transition_latency.attr,
734         &scaling_min_freq.attr,
735         &scaling_max_freq.attr,
736         &affected_cpus.attr,
737         &related_cpus.attr,
738         &scaling_governor.attr,
739         &scaling_driver.attr,
740         &scaling_available_governors.attr,
741         &scaling_setspeed.attr,
742         NULL
743 };
744
745 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
746 #define to_attr(a) container_of(a, struct freq_attr, attr)
747
748 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
749 {
750         struct cpufreq_policy *policy = to_policy(kobj);
751         struct freq_attr *fattr = to_attr(attr);
752         ssize_t ret;
753
754         if (!down_read_trylock(&cpufreq_rwsem))
755                 return -EINVAL;
756
757         down_read(&policy->rwsem);
758
759         if (fattr->show)
760                 ret = fattr->show(policy, buf);
761         else
762                 ret = -EIO;
763
764         up_read(&policy->rwsem);
765         up_read(&cpufreq_rwsem);
766
767         return ret;
768 }
769
770 static ssize_t store(struct kobject *kobj, struct attribute *attr,
771                      const char *buf, size_t count)
772 {
773         struct cpufreq_policy *policy = to_policy(kobj);
774         struct freq_attr *fattr = to_attr(attr);
775         ssize_t ret = -EINVAL;
776
777         get_online_cpus();
778
779         if (!cpu_online(policy->cpu))
780                 goto unlock;
781
782         if (!down_read_trylock(&cpufreq_rwsem))
783                 goto unlock;
784
785         down_write(&policy->rwsem);
786
787         if (fattr->store)
788                 ret = fattr->store(policy, buf, count);
789         else
790                 ret = -EIO;
791
792         up_write(&policy->rwsem);
793
794         up_read(&cpufreq_rwsem);
795 unlock:
796         put_online_cpus();
797
798         return ret;
799 }
800
801 static void cpufreq_sysfs_release(struct kobject *kobj)
802 {
803         struct cpufreq_policy *policy = to_policy(kobj);
804         pr_debug("last reference is dropped\n");
805         complete(&policy->kobj_unregister);
806 }
807
808 static const struct sysfs_ops sysfs_ops = {
809         .show   = show,
810         .store  = store,
811 };
812
813 static struct kobj_type ktype_cpufreq = {
814         .sysfs_ops      = &sysfs_ops,
815         .default_attrs  = default_attrs,
816         .release        = cpufreq_sysfs_release,
817 };
818
819 struct kobject *cpufreq_global_kobject;
820 EXPORT_SYMBOL(cpufreq_global_kobject);
821
822 static int cpufreq_global_kobject_usage;
823
824 int cpufreq_get_global_kobject(void)
825 {
826         if (!cpufreq_global_kobject_usage++)
827                 return kobject_add(cpufreq_global_kobject,
828                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
829
830         return 0;
831 }
832 EXPORT_SYMBOL(cpufreq_get_global_kobject);
833
834 void cpufreq_put_global_kobject(void)
835 {
836         if (!--cpufreq_global_kobject_usage)
837                 kobject_del(cpufreq_global_kobject);
838 }
839 EXPORT_SYMBOL(cpufreq_put_global_kobject);
840
841 int cpufreq_sysfs_create_file(const struct attribute *attr)
842 {
843         int ret = cpufreq_get_global_kobject();
844
845         if (!ret) {
846                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
847                 if (ret)
848                         cpufreq_put_global_kobject();
849         }
850
851         return ret;
852 }
853 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
854
855 void cpufreq_sysfs_remove_file(const struct attribute *attr)
856 {
857         sysfs_remove_file(cpufreq_global_kobject, attr);
858         cpufreq_put_global_kobject();
859 }
860 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
861
862 /* symlink affected CPUs */
863 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
864 {
865         unsigned int j;
866         int ret = 0;
867
868         for_each_cpu(j, policy->cpus) {
869                 struct device *cpu_dev;
870
871                 if (j == policy->cpu)
872                         continue;
873
874                 pr_debug("Adding link for CPU: %u\n", j);
875                 cpu_dev = get_cpu_device(j);
876                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
877                                         "cpufreq");
878                 if (ret)
879                         break;
880         }
881         return ret;
882 }
883
884 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
885                                      struct device *dev)
886 {
887         struct freq_attr **drv_attr;
888         int ret = 0;
889
890         /* prepare interface data */
891         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
892                                    &dev->kobj, "cpufreq");
893         if (ret)
894                 return ret;
895
896         /* set up files for this cpu device */
897         drv_attr = cpufreq_driver->attr;
898         while ((drv_attr) && (*drv_attr)) {
899                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
900                 if (ret)
901                         goto err_out_kobj_put;
902                 drv_attr++;
903         }
904         if (cpufreq_driver->get) {
905                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
906                 if (ret)
907                         goto err_out_kobj_put;
908         }
909         if (has_target()) {
910                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
911                 if (ret)
912                         goto err_out_kobj_put;
913         }
914         if (cpufreq_driver->bios_limit) {
915                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
916                 if (ret)
917                         goto err_out_kobj_put;
918         }
919
920         ret = cpufreq_add_dev_symlink(policy);
921         if (ret)
922                 goto err_out_kobj_put;
923
924         return ret;
925
926 err_out_kobj_put:
927         kobject_put(&policy->kobj);
928         wait_for_completion(&policy->kobj_unregister);
929         return ret;
930 }
931
932 static void cpufreq_init_policy(struct cpufreq_policy *policy)
933 {
934         struct cpufreq_governor *gov = NULL;
935         struct cpufreq_policy new_policy;
936         int ret = 0;
937
938         memcpy(&new_policy, policy, sizeof(*policy));
939
940         /* Update governor of new_policy to the governor used before hotplug */
941         gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
942         if (gov)
943                 pr_debug("Restoring governor %s for cpu %d\n",
944                                 policy->governor->name, policy->cpu);
945         else
946                 gov = CPUFREQ_DEFAULT_GOVERNOR;
947
948         new_policy.governor = gov;
949
950         /* Use the default policy if its valid. */
951         if (cpufreq_driver->setpolicy)
952                 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
953
954         /* set default policy */
955         ret = cpufreq_set_policy(policy, &new_policy);
956         if (ret) {
957                 pr_debug("setting policy failed\n");
958                 if (cpufreq_driver->exit)
959                         cpufreq_driver->exit(policy);
960         }
961 }
962
963 #ifdef CONFIG_HOTPLUG_CPU
964 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
965                                   unsigned int cpu, struct device *dev)
966 {
967         int ret = 0;
968         unsigned long flags;
969
970         if (has_target()) {
971                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
972                 if (ret) {
973                         pr_err("%s: Failed to stop governor\n", __func__);
974                         return ret;
975                 }
976         }
977
978         down_write(&policy->rwsem);
979
980         write_lock_irqsave(&cpufreq_driver_lock, flags);
981
982         cpumask_set_cpu(cpu, policy->cpus);
983         per_cpu(cpufreq_cpu_data, cpu) = policy;
984         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
985
986         up_write(&policy->rwsem);
987
988         if (has_target()) {
989                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
990                 if (!ret)
991                         ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
992
993                 if (ret) {
994                         pr_err("%s: Failed to start governor\n", __func__);
995                         return ret;
996                 }
997         }
998
999         return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
1000 }
1001 #endif
1002
1003 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1004 {
1005         struct cpufreq_policy *policy;
1006         unsigned long flags;
1007
1008         read_lock_irqsave(&cpufreq_driver_lock, flags);
1009
1010         policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1011
1012         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1013
1014         policy->governor = NULL;
1015
1016         return policy;
1017 }
1018
1019 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1020 {
1021         struct cpufreq_policy *policy;
1022
1023         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1024         if (!policy)
1025                 return NULL;
1026
1027         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1028                 goto err_free_policy;
1029
1030         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1031                 goto err_free_cpumask;
1032
1033         INIT_LIST_HEAD(&policy->policy_list);
1034         init_rwsem(&policy->rwsem);
1035         spin_lock_init(&policy->transition_lock);
1036         init_waitqueue_head(&policy->transition_wait);
1037
1038         return policy;
1039
1040 err_free_cpumask:
1041         free_cpumask_var(policy->cpus);
1042 err_free_policy:
1043         kfree(policy);
1044
1045         return NULL;
1046 }
1047
1048 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1049 {
1050         struct kobject *kobj;
1051         struct completion *cmp;
1052
1053         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054                         CPUFREQ_REMOVE_POLICY, policy);
1055
1056         down_read(&policy->rwsem);
1057         kobj = &policy->kobj;
1058         cmp = &policy->kobj_unregister;
1059         up_read(&policy->rwsem);
1060         kobject_put(kobj);
1061
1062         /*
1063          * We need to make sure that the underlying kobj is
1064          * actually not referenced anymore by anybody before we
1065          * proceed with unloading.
1066          */
1067         pr_debug("waiting for dropping of refcount\n");
1068         wait_for_completion(cmp);
1069         pr_debug("wait complete\n");
1070 }
1071
1072 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1073 {
1074         free_cpumask_var(policy->related_cpus);
1075         free_cpumask_var(policy->cpus);
1076         kfree(policy);
1077 }
1078
1079 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1080                              struct device *cpu_dev)
1081 {
1082         int ret;
1083
1084         if (WARN_ON(cpu == policy->cpu))
1085                 return 0;
1086
1087         /* Move kobject to the new policy->cpu */
1088         ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1089         if (ret) {
1090                 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1091                 return ret;
1092         }
1093
1094         down_write(&policy->rwsem);
1095
1096         policy->last_cpu = policy->cpu;
1097         policy->cpu = cpu;
1098
1099         up_write(&policy->rwsem);
1100
1101         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1102                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1103
1104         return 0;
1105 }
1106
1107 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1108 {
1109         unsigned int j, cpu = dev->id;
1110         int ret = -ENOMEM;
1111         struct cpufreq_policy *policy;
1112         unsigned long flags;
1113         bool recover_policy = cpufreq_suspended;
1114 #ifdef CONFIG_HOTPLUG_CPU
1115         struct cpufreq_policy *tpolicy;
1116 #endif
1117
1118         if (cpu_is_offline(cpu))
1119                 return 0;
1120
1121         pr_debug("adding CPU %u\n", cpu);
1122
1123 #ifdef CONFIG_SMP
1124         /* check whether a different CPU already registered this
1125          * CPU because it is in the same boat. */
1126         policy = cpufreq_cpu_get(cpu);
1127         if (unlikely(policy)) {
1128                 cpufreq_cpu_put(policy);
1129                 return 0;
1130         }
1131 #endif
1132
1133         if (!down_read_trylock(&cpufreq_rwsem))
1134                 return 0;
1135
1136 #ifdef CONFIG_HOTPLUG_CPU
1137         /* Check if this cpu was hot-unplugged earlier and has siblings */
1138         read_lock_irqsave(&cpufreq_driver_lock, flags);
1139         list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1140                 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1141                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1142                         ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1143                         up_read(&cpufreq_rwsem);
1144                         return ret;
1145                 }
1146         }
1147         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1148 #endif
1149
1150         /*
1151          * Restore the saved policy when doing light-weight init and fall back
1152          * to the full init if that fails.
1153          */
1154         policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1155         if (!policy) {
1156                 recover_policy = false;
1157                 policy = cpufreq_policy_alloc();
1158                 if (!policy)
1159                         goto nomem_out;
1160         }
1161
1162         /*
1163          * In the resume path, since we restore a saved policy, the assignment
1164          * to policy->cpu is like an update of the existing policy, rather than
1165          * the creation of a brand new one. So we need to perform this update
1166          * by invoking update_policy_cpu().
1167          */
1168         if (recover_policy && cpu != policy->cpu)
1169                 WARN_ON(update_policy_cpu(policy, cpu, dev));
1170         else
1171                 policy->cpu = cpu;
1172
1173         cpumask_copy(policy->cpus, cpumask_of(cpu));
1174
1175         init_completion(&policy->kobj_unregister);
1176         INIT_WORK(&policy->update, handle_update);
1177
1178         /* call driver. From then on the cpufreq must be able
1179          * to accept all calls to ->verify and ->setpolicy for this CPU
1180          */
1181         ret = cpufreq_driver->init(policy);
1182         if (ret) {
1183                 pr_debug("initialization failed\n");
1184                 goto err_set_policy_cpu;
1185         }
1186
1187         /* related cpus should atleast have policy->cpus */
1188         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1189
1190         /*
1191          * affected cpus must always be the one, which are online. We aren't
1192          * managing offline cpus here.
1193          */
1194         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1195
1196         if (!recover_policy) {
1197                 policy->user_policy.min = policy->min;
1198                 policy->user_policy.max = policy->max;
1199         }
1200
1201         down_write(&policy->rwsem);
1202         write_lock_irqsave(&cpufreq_driver_lock, flags);
1203         for_each_cpu(j, policy->cpus)
1204                 per_cpu(cpufreq_cpu_data, j) = policy;
1205         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1206
1207         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1208                 policy->cur = cpufreq_driver->get(policy->cpu);
1209                 if (!policy->cur) {
1210                         pr_err("%s: ->get() failed\n", __func__);
1211                         goto err_get_freq;
1212                 }
1213         }
1214
1215         /*
1216          * Sometimes boot loaders set CPU frequency to a value outside of
1217          * frequency table present with cpufreq core. In such cases CPU might be
1218          * unstable if it has to run on that frequency for long duration of time
1219          * and so its better to set it to a frequency which is specified in
1220          * freq-table. This also makes cpufreq stats inconsistent as
1221          * cpufreq-stats would fail to register because current frequency of CPU
1222          * isn't found in freq-table.
1223          *
1224          * Because we don't want this change to effect boot process badly, we go
1225          * for the next freq which is >= policy->cur ('cur' must be set by now,
1226          * otherwise we will end up setting freq to lowest of the table as 'cur'
1227          * is initialized to zero).
1228          *
1229          * We are passing target-freq as "policy->cur - 1" otherwise
1230          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1231          * equal to target-freq.
1232          */
1233         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1234             && has_target()) {
1235                 /* Are we running at unknown frequency ? */
1236                 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1237                 if (ret == -EINVAL) {
1238                         /* Warn user and fix it */
1239                         pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1240                                 __func__, policy->cpu, policy->cur);
1241                         ret = __cpufreq_driver_target(policy, policy->cur - 1,
1242                                 CPUFREQ_RELATION_L);
1243
1244                         /*
1245                          * Reaching here after boot in a few seconds may not
1246                          * mean that system will remain stable at "unknown"
1247                          * frequency for longer duration. Hence, a BUG_ON().
1248                          */
1249                         BUG_ON(ret);
1250                         pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1251                                 __func__, policy->cpu, policy->cur);
1252                 }
1253         }
1254
1255         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1256                                      CPUFREQ_START, policy);
1257
1258         if (!recover_policy) {
1259                 ret = cpufreq_add_dev_interface(policy, dev);
1260                 if (ret)
1261                         goto err_out_unregister;
1262                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1263                                 CPUFREQ_CREATE_POLICY, policy);
1264         }
1265
1266         write_lock_irqsave(&cpufreq_driver_lock, flags);
1267         list_add(&policy->policy_list, &cpufreq_policy_list);
1268         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1269
1270         cpufreq_init_policy(policy);
1271
1272         if (!recover_policy) {
1273                 policy->user_policy.policy = policy->policy;
1274                 policy->user_policy.governor = policy->governor;
1275         }
1276         up_write(&policy->rwsem);
1277
1278         kobject_uevent(&policy->kobj, KOBJ_ADD);
1279         up_read(&cpufreq_rwsem);
1280
1281         pr_debug("initialization complete\n");
1282
1283         return 0;
1284
1285 err_out_unregister:
1286 err_get_freq:
1287         write_lock_irqsave(&cpufreq_driver_lock, flags);
1288         for_each_cpu(j, policy->cpus)
1289                 per_cpu(cpufreq_cpu_data, j) = NULL;
1290         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1291
1292         up_write(&policy->rwsem);
1293
1294         if (cpufreq_driver->exit)
1295                 cpufreq_driver->exit(policy);
1296 err_set_policy_cpu:
1297         if (recover_policy) {
1298                 /* Do not leave stale fallback data behind. */
1299                 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1300                 cpufreq_policy_put_kobj(policy);
1301         }
1302         cpufreq_policy_free(policy);
1303
1304 nomem_out:
1305         up_read(&cpufreq_rwsem);
1306
1307         return ret;
1308 }
1309
1310 /**
1311  * cpufreq_add_dev - add a CPU device
1312  *
1313  * Adds the cpufreq interface for a CPU device.
1314  *
1315  * The Oracle says: try running cpufreq registration/unregistration concurrently
1316  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1317  * mess up, but more thorough testing is needed. - Mathieu
1318  */
1319 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1320 {
1321         return __cpufreq_add_dev(dev, sif);
1322 }
1323
1324 static int __cpufreq_remove_dev_prepare(struct device *dev,
1325                                         struct subsys_interface *sif)
1326 {
1327         unsigned int cpu = dev->id, cpus;
1328         int ret;
1329         unsigned long flags;
1330         struct cpufreq_policy *policy;
1331
1332         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1333
1334         write_lock_irqsave(&cpufreq_driver_lock, flags);
1335
1336         policy = per_cpu(cpufreq_cpu_data, cpu);
1337
1338         /* Save the policy somewhere when doing a light-weight tear-down */
1339         if (cpufreq_suspended)
1340                 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1341
1342         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1343
1344         if (!policy) {
1345                 pr_debug("%s: No cpu_data found\n", __func__);
1346                 return -EINVAL;
1347         }
1348
1349         if (has_target()) {
1350                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1351                 if (ret) {
1352                         pr_err("%s: Failed to stop governor\n", __func__);
1353                         return ret;
1354                 }
1355         }
1356
1357         if (!cpufreq_driver->setpolicy)
1358                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1359                         policy->governor->name, CPUFREQ_NAME_LEN);
1360
1361         down_read(&policy->rwsem);
1362         cpus = cpumask_weight(policy->cpus);
1363         up_read(&policy->rwsem);
1364
1365         if (cpu != policy->cpu) {
1366                 sysfs_remove_link(&dev->kobj, "cpufreq");
1367         } else if (cpus > 1) {
1368                 /* Nominate new CPU */
1369                 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1370                 struct device *cpu_dev = get_cpu_device(new_cpu);
1371
1372                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1373                 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1374                 if (ret) {
1375                         if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1376                                               "cpufreq"))
1377                                 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1378                                        __func__, cpu_dev->id);
1379                         return ret;
1380                 }
1381
1382                 if (!cpufreq_suspended)
1383                         pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1384                                  __func__, new_cpu, cpu);
1385         } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1386                 cpufreq_driver->stop_cpu(policy);
1387         }
1388
1389         return 0;
1390 }
1391
1392 static int __cpufreq_remove_dev_finish(struct device *dev,
1393                                        struct subsys_interface *sif)
1394 {
1395         unsigned int cpu = dev->id, cpus;
1396         int ret;
1397         unsigned long flags;
1398         struct cpufreq_policy *policy;
1399
1400         read_lock_irqsave(&cpufreq_driver_lock, flags);
1401         policy = per_cpu(cpufreq_cpu_data, cpu);
1402         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1403
1404         if (!policy) {
1405                 pr_debug("%s: No cpu_data found\n", __func__);
1406                 return -EINVAL;
1407         }
1408
1409         down_write(&policy->rwsem);
1410         cpus = cpumask_weight(policy->cpus);
1411
1412         if (cpus > 1)
1413                 cpumask_clear_cpu(cpu, policy->cpus);
1414         up_write(&policy->rwsem);
1415
1416         /* If cpu is last user of policy, free policy */
1417         if (cpus == 1) {
1418                 if (has_target()) {
1419                         ret = __cpufreq_governor(policy,
1420                                         CPUFREQ_GOV_POLICY_EXIT);
1421                         if (ret) {
1422                                 pr_err("%s: Failed to exit governor\n",
1423                                        __func__);
1424                                 return ret;
1425                         }
1426                 }
1427
1428                 if (!cpufreq_suspended)
1429                         cpufreq_policy_put_kobj(policy);
1430
1431                 /*
1432                  * Perform the ->exit() even during light-weight tear-down,
1433                  * since this is a core component, and is essential for the
1434                  * subsequent light-weight ->init() to succeed.
1435                  */
1436                 if (cpufreq_driver->exit)
1437                         cpufreq_driver->exit(policy);
1438
1439                 /* Remove policy from list of active policies */
1440                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1441                 list_del(&policy->policy_list);
1442                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1443
1444                 if (!cpufreq_suspended)
1445                         cpufreq_policy_free(policy);
1446         } else if (has_target()) {
1447                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1448                 if (!ret)
1449                         ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1450
1451                 if (ret) {
1452                         pr_err("%s: Failed to start governor\n", __func__);
1453                         return ret;
1454                 }
1455         }
1456
1457         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1458         return 0;
1459 }
1460
1461 /**
1462  * cpufreq_remove_dev - remove a CPU device
1463  *
1464  * Removes the cpufreq interface for a CPU device.
1465  */
1466 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1467 {
1468         unsigned int cpu = dev->id;
1469         int ret;
1470
1471         if (cpu_is_offline(cpu))
1472                 return 0;
1473
1474         ret = __cpufreq_remove_dev_prepare(dev, sif);
1475
1476         if (!ret)
1477                 ret = __cpufreq_remove_dev_finish(dev, sif);
1478
1479         return ret;
1480 }
1481
1482 static void handle_update(struct work_struct *work)
1483 {
1484         struct cpufreq_policy *policy =
1485                 container_of(work, struct cpufreq_policy, update);
1486         unsigned int cpu = policy->cpu;
1487         pr_debug("handle_update for cpu %u called\n", cpu);
1488         cpufreq_update_policy(cpu);
1489 }
1490
1491 /**
1492  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1493  *      in deep trouble.
1494  *      @cpu: cpu number
1495  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1496  *      @new_freq: CPU frequency the CPU actually runs at
1497  *
1498  *      We adjust to current frequency first, and need to clean up later.
1499  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1500  */
1501 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1502                                 unsigned int new_freq)
1503 {
1504         struct cpufreq_policy *policy;
1505         struct cpufreq_freqs freqs;
1506         unsigned long flags;
1507
1508         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1509                  old_freq, new_freq);
1510
1511         freqs.old = old_freq;
1512         freqs.new = new_freq;
1513
1514         read_lock_irqsave(&cpufreq_driver_lock, flags);
1515         policy = per_cpu(cpufreq_cpu_data, cpu);
1516         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1517
1518         cpufreq_freq_transition_begin(policy, &freqs);
1519         cpufreq_freq_transition_end(policy, &freqs, 0);
1520 }
1521
1522 /**
1523  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1524  * @cpu: CPU number
1525  *
1526  * This is the last known freq, without actually getting it from the driver.
1527  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1528  */
1529 unsigned int cpufreq_quick_get(unsigned int cpu)
1530 {
1531         struct cpufreq_policy *policy;
1532         unsigned int ret_freq = 0;
1533
1534         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1535                 return cpufreq_driver->get(cpu);
1536
1537         policy = cpufreq_cpu_get(cpu);
1538         if (policy) {
1539                 ret_freq = policy->cur;
1540                 cpufreq_cpu_put(policy);
1541         }
1542
1543         return ret_freq;
1544 }
1545 EXPORT_SYMBOL(cpufreq_quick_get);
1546
1547 /**
1548  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1549  * @cpu: CPU number
1550  *
1551  * Just return the max possible frequency for a given CPU.
1552  */
1553 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1554 {
1555         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1556         unsigned int ret_freq = 0;
1557
1558         if (policy) {
1559                 ret_freq = policy->max;
1560                 cpufreq_cpu_put(policy);
1561         }
1562
1563         return ret_freq;
1564 }
1565 EXPORT_SYMBOL(cpufreq_quick_get_max);
1566
1567 static unsigned int __cpufreq_get(unsigned int cpu)
1568 {
1569         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1570         unsigned int ret_freq = 0;
1571
1572         if (!cpufreq_driver->get)
1573                 return ret_freq;
1574
1575         ret_freq = cpufreq_driver->get(cpu);
1576
1577         if (ret_freq && policy->cur &&
1578                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1579                 /* verify no discrepancy between actual and
1580                                         saved value exists */
1581                 if (unlikely(ret_freq != policy->cur)) {
1582                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1583                         schedule_work(&policy->update);
1584                 }
1585         }
1586
1587         return ret_freq;
1588 }
1589
1590 /**
1591  * cpufreq_get - get the current CPU frequency (in kHz)
1592  * @cpu: CPU number
1593  *
1594  * Get the CPU current (static) CPU frequency
1595  */
1596 unsigned int cpufreq_get(unsigned int cpu)
1597 {
1598         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1599         unsigned int ret_freq = 0;
1600
1601         if (policy) {
1602                 down_read(&policy->rwsem);
1603                 ret_freq = __cpufreq_get(cpu);
1604                 up_read(&policy->rwsem);
1605
1606                 cpufreq_cpu_put(policy);
1607         }
1608
1609         return ret_freq;
1610 }
1611 EXPORT_SYMBOL(cpufreq_get);
1612
1613 static struct subsys_interface cpufreq_interface = {
1614         .name           = "cpufreq",
1615         .subsys         = &cpu_subsys,
1616         .add_dev        = cpufreq_add_dev,
1617         .remove_dev     = cpufreq_remove_dev,
1618 };
1619
1620 /*
1621  * In case platform wants some specific frequency to be configured
1622  * during suspend..
1623  */
1624 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1625 {
1626         int ret;
1627
1628         if (!policy->suspend_freq) {
1629                 pr_err("%s: suspend_freq can't be zero\n", __func__);
1630                 return -EINVAL;
1631         }
1632
1633         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1634                         policy->suspend_freq);
1635
1636         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1637                         CPUFREQ_RELATION_H);
1638         if (ret)
1639                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1640                                 __func__, policy->suspend_freq, ret);
1641
1642         return ret;
1643 }
1644 EXPORT_SYMBOL(cpufreq_generic_suspend);
1645
1646 /**
1647  * cpufreq_suspend() - Suspend CPUFreq governors
1648  *
1649  * Called during system wide Suspend/Hibernate cycles for suspending governors
1650  * as some platforms can't change frequency after this point in suspend cycle.
1651  * Because some of the devices (like: i2c, regulators, etc) they use for
1652  * changing frequency are suspended quickly after this point.
1653  */
1654 void cpufreq_suspend(void)
1655 {
1656         struct cpufreq_policy *policy;
1657
1658         if (!cpufreq_driver)
1659                 return;
1660
1661         cpufreq_suspended = true;
1662
1663         if (!has_target())
1664                 return;
1665
1666         pr_debug("%s: Suspending Governors\n", __func__);
1667
1668         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1669                 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1670                         pr_err("%s: Failed to stop governor for policy: %p\n",
1671                                 __func__, policy);
1672                 else if (cpufreq_driver->suspend
1673                     && cpufreq_driver->suspend(policy))
1674                         pr_err("%s: Failed to suspend driver: %p\n", __func__,
1675                                 policy);
1676         }
1677 }
1678
1679 /**
1680  * cpufreq_resume() - Resume CPUFreq governors
1681  *
1682  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1683  * are suspended with cpufreq_suspend().
1684  */
1685 void cpufreq_resume(void)
1686 {
1687         struct cpufreq_policy *policy;
1688
1689         if (!cpufreq_driver)
1690                 return;
1691
1692         cpufreq_suspended = false;
1693
1694         if (!has_target())
1695                 return;
1696
1697         pr_debug("%s: Resuming Governors\n", __func__);
1698
1699         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1700                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1701                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1702                                 policy);
1703                 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1704                     || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1705                         pr_err("%s: Failed to start governor for policy: %p\n",
1706                                 __func__, policy);
1707
1708                 /*
1709                  * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1710                  * policy in list. It will verify that the current freq is in
1711                  * sync with what we believe it to be.
1712                  */
1713                 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1714                         schedule_work(&policy->update);
1715         }
1716 }
1717
1718 /**
1719  *      cpufreq_get_current_driver - return current driver's name
1720  *
1721  *      Return the name string of the currently loaded cpufreq driver
1722  *      or NULL, if none.
1723  */
1724 const char *cpufreq_get_current_driver(void)
1725 {
1726         if (cpufreq_driver)
1727                 return cpufreq_driver->name;
1728
1729         return NULL;
1730 }
1731 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1732
1733 /*********************************************************************
1734  *                     NOTIFIER LISTS INTERFACE                      *
1735  *********************************************************************/
1736
1737 /**
1738  *      cpufreq_register_notifier - register a driver with cpufreq
1739  *      @nb: notifier function to register
1740  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1741  *
1742  *      Add a driver to one of two lists: either a list of drivers that
1743  *      are notified about clock rate changes (once before and once after
1744  *      the transition), or a list of drivers that are notified about
1745  *      changes in cpufreq policy.
1746  *
1747  *      This function may sleep, and has the same return conditions as
1748  *      blocking_notifier_chain_register.
1749  */
1750 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1751 {
1752         int ret;
1753
1754         if (cpufreq_disabled())
1755                 return -EINVAL;
1756
1757         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1758
1759         switch (list) {
1760         case CPUFREQ_TRANSITION_NOTIFIER:
1761                 ret = srcu_notifier_chain_register(
1762                                 &cpufreq_transition_notifier_list, nb);
1763                 break;
1764         case CPUFREQ_POLICY_NOTIFIER:
1765                 ret = blocking_notifier_chain_register(
1766                                 &cpufreq_policy_notifier_list, nb);
1767                 break;
1768         default:
1769                 ret = -EINVAL;
1770         }
1771
1772         return ret;
1773 }
1774 EXPORT_SYMBOL(cpufreq_register_notifier);
1775
1776 /**
1777  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1778  *      @nb: notifier block to be unregistered
1779  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1780  *
1781  *      Remove a driver from the CPU frequency notifier list.
1782  *
1783  *      This function may sleep, and has the same return conditions as
1784  *      blocking_notifier_chain_unregister.
1785  */
1786 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1787 {
1788         int ret;
1789
1790         if (cpufreq_disabled())
1791                 return -EINVAL;
1792
1793         switch (list) {
1794         case CPUFREQ_TRANSITION_NOTIFIER:
1795                 ret = srcu_notifier_chain_unregister(
1796                                 &cpufreq_transition_notifier_list, nb);
1797                 break;
1798         case CPUFREQ_POLICY_NOTIFIER:
1799                 ret = blocking_notifier_chain_unregister(
1800                                 &cpufreq_policy_notifier_list, nb);
1801                 break;
1802         default:
1803                 ret = -EINVAL;
1804         }
1805
1806         return ret;
1807 }
1808 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1809
1810
1811 /*********************************************************************
1812  *                              GOVERNORS                            *
1813  *********************************************************************/
1814
1815 /* Must set freqs->new to intermediate frequency */
1816 static int __target_intermediate(struct cpufreq_policy *policy,
1817                                  struct cpufreq_freqs *freqs, int index)
1818 {
1819         int ret;
1820
1821         freqs->new = cpufreq_driver->get_intermediate(policy, index);
1822
1823         /* We don't need to switch to intermediate freq */
1824         if (!freqs->new)
1825                 return 0;
1826
1827         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1828                  __func__, policy->cpu, freqs->old, freqs->new);
1829
1830         cpufreq_freq_transition_begin(policy, freqs);
1831         ret = cpufreq_driver->target_intermediate(policy, index);
1832         cpufreq_freq_transition_end(policy, freqs, ret);
1833
1834         if (ret)
1835                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1836                        __func__, ret);
1837
1838         return ret;
1839 }
1840
1841 static int __target_index(struct cpufreq_policy *policy,
1842                           struct cpufreq_frequency_table *freq_table, int index)
1843 {
1844         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1845         unsigned int intermediate_freq = 0;
1846         int retval = -EINVAL;
1847         bool notify;
1848
1849         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1850         if (notify) {
1851                 /* Handle switching to intermediate frequency */
1852                 if (cpufreq_driver->get_intermediate) {
1853                         retval = __target_intermediate(policy, &freqs, index);
1854                         if (retval)
1855                                 return retval;
1856
1857                         intermediate_freq = freqs.new;
1858                         /* Set old freq to intermediate */
1859                         if (intermediate_freq)
1860                                 freqs.old = freqs.new;
1861                 }
1862
1863                 freqs.new = freq_table[index].frequency;
1864                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1865                          __func__, policy->cpu, freqs.old, freqs.new);
1866
1867                 cpufreq_freq_transition_begin(policy, &freqs);
1868         }
1869
1870         retval = cpufreq_driver->target_index(policy, index);
1871         if (retval)
1872                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1873                        retval);
1874
1875         if (notify) {
1876                 cpufreq_freq_transition_end(policy, &freqs, retval);
1877
1878                 /*
1879                  * Failed after setting to intermediate freq? Driver should have
1880                  * reverted back to initial frequency and so should we. Check
1881                  * here for intermediate_freq instead of get_intermediate, in
1882                  * case we have't switched to intermediate freq at all.
1883                  */
1884                 if (unlikely(retval && intermediate_freq)) {
1885                         freqs.old = intermediate_freq;
1886                         freqs.new = policy->restore_freq;
1887                         cpufreq_freq_transition_begin(policy, &freqs);
1888                         cpufreq_freq_transition_end(policy, &freqs, 0);
1889                 }
1890         }
1891
1892         return retval;
1893 }
1894
1895 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1896                             unsigned int target_freq,
1897                             unsigned int relation)
1898 {
1899         unsigned int old_target_freq = target_freq;
1900         int retval = -EINVAL;
1901
1902         if (cpufreq_disabled())
1903                 return -ENODEV;
1904
1905         /* Make sure that target_freq is within supported range */
1906         if (target_freq > policy->max)
1907                 target_freq = policy->max;
1908         if (target_freq < policy->min)
1909                 target_freq = policy->min;
1910
1911         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1912                  policy->cpu, target_freq, relation, old_target_freq);
1913
1914         /*
1915          * This might look like a redundant call as we are checking it again
1916          * after finding index. But it is left intentionally for cases where
1917          * exactly same freq is called again and so we can save on few function
1918          * calls.
1919          */
1920         if (target_freq == policy->cur)
1921                 return 0;
1922
1923         /* Save last value to restore later on errors */
1924         policy->restore_freq = policy->cur;
1925
1926         if (cpufreq_driver->target)
1927                 retval = cpufreq_driver->target(policy, target_freq, relation);
1928         else if (cpufreq_driver->target_index) {
1929                 struct cpufreq_frequency_table *freq_table;
1930                 int index;
1931
1932                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1933                 if (unlikely(!freq_table)) {
1934                         pr_err("%s: Unable to find freq_table\n", __func__);
1935                         goto out;
1936                 }
1937
1938                 retval = cpufreq_frequency_table_target(policy, freq_table,
1939                                 target_freq, relation, &index);
1940                 if (unlikely(retval)) {
1941                         pr_err("%s: Unable to find matching freq\n", __func__);
1942                         goto out;
1943                 }
1944
1945                 if (freq_table[index].frequency == policy->cur) {
1946                         retval = 0;
1947                         goto out;
1948                 }
1949
1950                 retval = __target_index(policy, freq_table, index);
1951         }
1952
1953 out:
1954         return retval;
1955 }
1956 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1957
1958 int cpufreq_driver_target(struct cpufreq_policy *policy,
1959                           unsigned int target_freq,
1960                           unsigned int relation)
1961 {
1962         int ret = -EINVAL;
1963
1964         down_write(&policy->rwsem);
1965
1966         ret = __cpufreq_driver_target(policy, target_freq, relation);
1967
1968         up_write(&policy->rwsem);
1969
1970         return ret;
1971 }
1972 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1973
1974 /*
1975  * when "event" is CPUFREQ_GOV_LIMITS
1976  */
1977
1978 static int __cpufreq_governor(struct cpufreq_policy *policy,
1979                                         unsigned int event)
1980 {
1981         int ret;
1982
1983         /* Only must be defined when default governor is known to have latency
1984            restrictions, like e.g. conservative or ondemand.
1985            That this is the case is already ensured in Kconfig
1986         */
1987 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1988         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1989 #else
1990         struct cpufreq_governor *gov = NULL;
1991 #endif
1992
1993         /* Don't start any governor operations if we are entering suspend */
1994         if (cpufreq_suspended)
1995                 return 0;
1996
1997         if (policy->governor->max_transition_latency &&
1998             policy->cpuinfo.transition_latency >
1999             policy->governor->max_transition_latency) {
2000                 if (!gov)
2001                         return -EINVAL;
2002                 else {
2003                         pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2004                                 policy->governor->name, gov->name);
2005                         policy->governor = gov;
2006                 }
2007         }
2008
2009         if (event == CPUFREQ_GOV_POLICY_INIT)
2010                 if (!try_module_get(policy->governor->owner))
2011                         return -EINVAL;
2012
2013         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2014                  policy->cpu, event);
2015
2016         mutex_lock(&cpufreq_governor_lock);
2017         if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2018             || (!policy->governor_enabled
2019             && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2020                 mutex_unlock(&cpufreq_governor_lock);
2021                 return -EBUSY;
2022         }
2023
2024         if (event == CPUFREQ_GOV_STOP)
2025                 policy->governor_enabled = false;
2026         else if (event == CPUFREQ_GOV_START)
2027                 policy->governor_enabled = true;
2028
2029         mutex_unlock(&cpufreq_governor_lock);
2030
2031         ret = policy->governor->governor(policy, event);
2032
2033         if (!ret) {
2034                 if (event == CPUFREQ_GOV_POLICY_INIT)
2035                         policy->governor->initialized++;
2036                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2037                         policy->governor->initialized--;
2038         } else {
2039                 /* Restore original values */
2040                 mutex_lock(&cpufreq_governor_lock);
2041                 if (event == CPUFREQ_GOV_STOP)
2042                         policy->governor_enabled = true;
2043                 else if (event == CPUFREQ_GOV_START)
2044                         policy->governor_enabled = false;
2045                 mutex_unlock(&cpufreq_governor_lock);
2046         }
2047
2048         if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2049                         ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2050                 module_put(policy->governor->owner);
2051
2052         return ret;
2053 }
2054
2055 int cpufreq_register_governor(struct cpufreq_governor *governor)
2056 {
2057         int err;
2058
2059         if (!governor)
2060                 return -EINVAL;
2061
2062         if (cpufreq_disabled())
2063                 return -ENODEV;
2064
2065         mutex_lock(&cpufreq_governor_mutex);
2066
2067         governor->initialized = 0;
2068         err = -EBUSY;
2069         if (__find_governor(governor->name) == NULL) {
2070                 err = 0;
2071                 list_add(&governor->governor_list, &cpufreq_governor_list);
2072         }
2073
2074         mutex_unlock(&cpufreq_governor_mutex);
2075         return err;
2076 }
2077 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2078
2079 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2080 {
2081         int cpu;
2082
2083         if (!governor)
2084                 return;
2085
2086         if (cpufreq_disabled())
2087                 return;
2088
2089         for_each_present_cpu(cpu) {
2090                 if (cpu_online(cpu))
2091                         continue;
2092                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2093                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2094         }
2095
2096         mutex_lock(&cpufreq_governor_mutex);
2097         list_del(&governor->governor_list);
2098         mutex_unlock(&cpufreq_governor_mutex);
2099         return;
2100 }
2101 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2102
2103
2104 /*********************************************************************
2105  *                          POLICY INTERFACE                         *
2106  *********************************************************************/
2107
2108 /**
2109  * cpufreq_get_policy - get the current cpufreq_policy
2110  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2111  *      is written
2112  *
2113  * Reads the current cpufreq policy.
2114  */
2115 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2116 {
2117         struct cpufreq_policy *cpu_policy;
2118         if (!policy)
2119                 return -EINVAL;
2120
2121         cpu_policy = cpufreq_cpu_get(cpu);
2122         if (!cpu_policy)
2123                 return -EINVAL;
2124
2125         memcpy(policy, cpu_policy, sizeof(*policy));
2126
2127         cpufreq_cpu_put(cpu_policy);
2128         return 0;
2129 }
2130 EXPORT_SYMBOL(cpufreq_get_policy);
2131
2132 /*
2133  * policy : current policy.
2134  * new_policy: policy to be set.
2135  */
2136 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2137                                 struct cpufreq_policy *new_policy)
2138 {
2139         struct cpufreq_governor *old_gov;
2140         int ret;
2141
2142         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2143                  new_policy->cpu, new_policy->min, new_policy->max);
2144
2145         memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2146
2147         if (new_policy->min > policy->max || new_policy->max < policy->min)
2148                 return -EINVAL;
2149
2150         /* verify the cpu speed can be set within this limit */
2151         ret = cpufreq_driver->verify(new_policy);
2152         if (ret)
2153                 return ret;
2154
2155         /* adjust if necessary - all reasons */
2156         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2157                         CPUFREQ_ADJUST, new_policy);
2158
2159         /* adjust if necessary - hardware incompatibility*/
2160         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2161                         CPUFREQ_INCOMPATIBLE, new_policy);
2162
2163         /*
2164          * verify the cpu speed can be set within this limit, which might be
2165          * different to the first one
2166          */
2167         ret = cpufreq_driver->verify(new_policy);
2168         if (ret)
2169                 return ret;
2170
2171         /* notification of the new policy */
2172         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2173                         CPUFREQ_NOTIFY, new_policy);
2174
2175         policy->min = new_policy->min;
2176         policy->max = new_policy->max;
2177
2178         pr_debug("new min and max freqs are %u - %u kHz\n",
2179                  policy->min, policy->max);
2180
2181         if (cpufreq_driver->setpolicy) {
2182                 policy->policy = new_policy->policy;
2183                 pr_debug("setting range\n");
2184                 return cpufreq_driver->setpolicy(new_policy);
2185         }
2186
2187         if (new_policy->governor == policy->governor)
2188                 goto out;
2189
2190         pr_debug("governor switch\n");
2191
2192         /* save old, working values */
2193         old_gov = policy->governor;
2194         /* end old governor */
2195         if (old_gov) {
2196                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2197                 up_write(&policy->rwsem);
2198                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2199                 down_write(&policy->rwsem);
2200         }
2201
2202         /* start new governor */
2203         policy->governor = new_policy->governor;
2204         if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2205                 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2206                         goto out;
2207
2208                 up_write(&policy->rwsem);
2209                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2210                 down_write(&policy->rwsem);
2211         }
2212
2213         /* new governor failed, so re-start old one */
2214         pr_debug("starting governor %s failed\n", policy->governor->name);
2215         if (old_gov) {
2216                 policy->governor = old_gov;
2217                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2218                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2219         }
2220
2221         return -EINVAL;
2222
2223  out:
2224         pr_debug("governor: change or update limits\n");
2225         return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2226 }
2227
2228 /**
2229  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2230  *      @cpu: CPU which shall be re-evaluated
2231  *
2232  *      Useful for policy notifiers which have different necessities
2233  *      at different times.
2234  */
2235 int cpufreq_update_policy(unsigned int cpu)
2236 {
2237         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2238         struct cpufreq_policy new_policy;
2239         int ret;
2240
2241         if (!policy)
2242                 return -ENODEV;
2243
2244         down_write(&policy->rwsem);
2245
2246         pr_debug("updating policy for CPU %u\n", cpu);
2247         memcpy(&new_policy, policy, sizeof(*policy));
2248         new_policy.min = policy->user_policy.min;
2249         new_policy.max = policy->user_policy.max;
2250         new_policy.policy = policy->user_policy.policy;
2251         new_policy.governor = policy->user_policy.governor;
2252
2253         /*
2254          * BIOS might change freq behind our back
2255          * -> ask driver for current freq and notify governors about a change
2256          */
2257         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2258                 new_policy.cur = cpufreq_driver->get(cpu);
2259                 if (WARN_ON(!new_policy.cur)) {
2260                         ret = -EIO;
2261                         goto unlock;
2262                 }
2263
2264                 if (!policy->cur) {
2265                         pr_debug("Driver did not initialize current freq\n");
2266                         policy->cur = new_policy.cur;
2267                 } else {
2268                         if (policy->cur != new_policy.cur && has_target())
2269                                 cpufreq_out_of_sync(cpu, policy->cur,
2270                                                                 new_policy.cur);
2271                 }
2272         }
2273
2274         ret = cpufreq_set_policy(policy, &new_policy);
2275
2276 unlock:
2277         up_write(&policy->rwsem);
2278
2279         cpufreq_cpu_put(policy);
2280         return ret;
2281 }
2282 EXPORT_SYMBOL(cpufreq_update_policy);
2283
2284 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2285                                         unsigned long action, void *hcpu)
2286 {
2287         unsigned int cpu = (unsigned long)hcpu;
2288         struct device *dev;
2289
2290         dev = get_cpu_device(cpu);
2291         if (dev) {
2292                 switch (action & ~CPU_TASKS_FROZEN) {
2293                 case CPU_ONLINE:
2294                         __cpufreq_add_dev(dev, NULL);
2295                         break;
2296
2297                 case CPU_DOWN_PREPARE:
2298                         __cpufreq_remove_dev_prepare(dev, NULL);
2299                         break;
2300
2301                 case CPU_POST_DEAD:
2302                         __cpufreq_remove_dev_finish(dev, NULL);
2303                         break;
2304
2305                 case CPU_DOWN_FAILED:
2306                         __cpufreq_add_dev(dev, NULL);
2307                         break;
2308                 }
2309         }
2310         return NOTIFY_OK;
2311 }
2312
2313 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2314         .notifier_call = cpufreq_cpu_callback,
2315 };
2316
2317 /*********************************************************************
2318  *               BOOST                                               *
2319  *********************************************************************/
2320 static int cpufreq_boost_set_sw(int state)
2321 {
2322         struct cpufreq_frequency_table *freq_table;
2323         struct cpufreq_policy *policy;
2324         int ret = -EINVAL;
2325
2326         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2327                 freq_table = cpufreq_frequency_get_table(policy->cpu);
2328                 if (freq_table) {
2329                         ret = cpufreq_frequency_table_cpuinfo(policy,
2330                                                         freq_table);
2331                         if (ret) {
2332                                 pr_err("%s: Policy frequency update failed\n",
2333                                        __func__);
2334                                 break;
2335                         }
2336                         policy->user_policy.max = policy->max;
2337                         __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2338                 }
2339         }
2340
2341         return ret;
2342 }
2343
2344 int cpufreq_boost_trigger_state(int state)
2345 {
2346         unsigned long flags;
2347         int ret = 0;
2348
2349         if (cpufreq_driver->boost_enabled == state)
2350                 return 0;
2351
2352         write_lock_irqsave(&cpufreq_driver_lock, flags);
2353         cpufreq_driver->boost_enabled = state;
2354         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2355
2356         ret = cpufreq_driver->set_boost(state);
2357         if (ret) {
2358                 write_lock_irqsave(&cpufreq_driver_lock, flags);
2359                 cpufreq_driver->boost_enabled = !state;
2360                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2361
2362                 pr_err("%s: Cannot %s BOOST\n",
2363                        __func__, state ? "enable" : "disable");
2364         }
2365
2366         return ret;
2367 }
2368
2369 int cpufreq_boost_supported(void)
2370 {
2371         if (likely(cpufreq_driver))
2372                 return cpufreq_driver->boost_supported;
2373
2374         return 0;
2375 }
2376 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2377
2378 int cpufreq_boost_enabled(void)
2379 {
2380         return cpufreq_driver->boost_enabled;
2381 }
2382 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2383
2384 /*********************************************************************
2385  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2386  *********************************************************************/
2387
2388 /**
2389  * cpufreq_register_driver - register a CPU Frequency driver
2390  * @driver_data: A struct cpufreq_driver containing the values#
2391  * submitted by the CPU Frequency driver.
2392  *
2393  * Registers a CPU Frequency driver to this core code. This code
2394  * returns zero on success, -EBUSY when another driver got here first
2395  * (and isn't unregistered in the meantime).
2396  *
2397  */
2398 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2399 {
2400         unsigned long flags;
2401         int ret;
2402
2403         if (cpufreq_disabled())
2404                 return -ENODEV;
2405
2406         if (!driver_data || !driver_data->verify || !driver_data->init ||
2407             !(driver_data->setpolicy || driver_data->target_index ||
2408                     driver_data->target) ||
2409              (driver_data->setpolicy && (driver_data->target_index ||
2410                     driver_data->target)) ||
2411              (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2412                 return -EINVAL;
2413
2414         pr_debug("trying to register driver %s\n", driver_data->name);
2415
2416         if (driver_data->setpolicy)
2417                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2418
2419         write_lock_irqsave(&cpufreq_driver_lock, flags);
2420         if (cpufreq_driver) {
2421                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2422                 return -EEXIST;
2423         }
2424         cpufreq_driver = driver_data;
2425         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2426
2427         if (cpufreq_boost_supported()) {
2428                 /*
2429                  * Check if driver provides function to enable boost -
2430                  * if not, use cpufreq_boost_set_sw as default
2431                  */
2432                 if (!cpufreq_driver->set_boost)
2433                         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2434
2435                 ret = cpufreq_sysfs_create_file(&boost.attr);
2436                 if (ret) {
2437                         pr_err("%s: cannot register global BOOST sysfs file\n",
2438                                __func__);
2439                         goto err_null_driver;
2440                 }
2441         }
2442
2443         ret = subsys_interface_register(&cpufreq_interface);
2444         if (ret)
2445                 goto err_boost_unreg;
2446
2447         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2448                 int i;
2449                 ret = -ENODEV;
2450
2451                 /* check for at least one working CPU */
2452                 for (i = 0; i < nr_cpu_ids; i++)
2453                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2454                                 ret = 0;
2455                                 break;
2456                         }
2457
2458                 /* if all ->init() calls failed, unregister */
2459                 if (ret) {
2460                         pr_debug("no CPU initialized for driver %s\n",
2461                                  driver_data->name);
2462                         goto err_if_unreg;
2463                 }
2464         }
2465
2466         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2467         pr_debug("driver %s up and running\n", driver_data->name);
2468
2469         return 0;
2470 err_if_unreg:
2471         subsys_interface_unregister(&cpufreq_interface);
2472 err_boost_unreg:
2473         if (cpufreq_boost_supported())
2474                 cpufreq_sysfs_remove_file(&boost.attr);
2475 err_null_driver:
2476         write_lock_irqsave(&cpufreq_driver_lock, flags);
2477         cpufreq_driver = NULL;
2478         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2479         return ret;
2480 }
2481 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2482
2483 /**
2484  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2485  *
2486  * Unregister the current CPUFreq driver. Only call this if you have
2487  * the right to do so, i.e. if you have succeeded in initialising before!
2488  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2489  * currently not initialised.
2490  */
2491 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2492 {
2493         unsigned long flags;
2494
2495         if (!cpufreq_driver || (driver != cpufreq_driver))
2496                 return -EINVAL;
2497
2498         pr_debug("unregistering driver %s\n", driver->name);
2499
2500         subsys_interface_unregister(&cpufreq_interface);
2501         if (cpufreq_boost_supported())
2502                 cpufreq_sysfs_remove_file(&boost.attr);
2503
2504         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2505
2506         down_write(&cpufreq_rwsem);
2507         write_lock_irqsave(&cpufreq_driver_lock, flags);
2508
2509         cpufreq_driver = NULL;
2510
2511         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2512         up_write(&cpufreq_rwsem);
2513
2514         return 0;
2515 }
2516 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2517
2518 static int __init cpufreq_core_init(void)
2519 {
2520         if (cpufreq_disabled())
2521                 return -ENODEV;
2522
2523         cpufreq_global_kobject = kobject_create();
2524         BUG_ON(!cpufreq_global_kobject);
2525
2526         return 0;
2527 }
2528 core_initcall(cpufreq_core_init);