cpufreq: Give consistent names to cpufreq_policy objects
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34  * The "cpufreq driver" - the arch- or hardware-dependent low
35  * level driver of CPUFreq support, and its spinlock. This lock
36  * also protects the cpufreq_cpu_data array.
37  */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47 #endif
48
49 /*
50  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
51  * all cpufreq/hotplug/workqueue/etc related lock issues.
52  *
53  * The rules for this semaphore:
54  * - Any routine that wants to read from the policy structure will
55  *   do a down_read on this semaphore.
56  * - Any routine that will write to the policy structure and/or may take away
57  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
58  *   mode before doing so.
59  *
60  * Additional rules:
61  * - Governor routines that can be called in cpufreq hotplug path should not
62  *   take this sem as top level hotplug notifier handler takes this.
63  * - Lock should not be held across
64  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
65  */
66 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
67 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69 #define lock_policy_rwsem(mode, cpu)                                    \
70 static int lock_policy_rwsem_##mode(int cpu)                            \
71 {                                                                       \
72         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
73         BUG_ON(policy_cpu == -1);                                       \
74         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
75                                                                         \
76         return 0;                                                       \
77 }
78
79 lock_policy_rwsem(read, cpu);
80 lock_policy_rwsem(write, cpu);
81
82 #define unlock_policy_rwsem(mode, cpu)                                  \
83 static void unlock_policy_rwsem_##mode(int cpu)                         \
84 {                                                                       \
85         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
86         BUG_ON(policy_cpu == -1);                                       \
87         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
88 }
89
90 unlock_policy_rwsem(read, cpu);
91 unlock_policy_rwsem(write, cpu);
92
93 /* internal prototypes */
94 static int __cpufreq_governor(struct cpufreq_policy *policy,
95                 unsigned int event);
96 static unsigned int __cpufreq_get(unsigned int cpu);
97 static void handle_update(struct work_struct *work);
98
99 /**
100  * Two notifier lists: the "policy" list is involved in the
101  * validation process for a new CPU frequency policy; the
102  * "transition" list for kernel code that needs to handle
103  * changes to devices when the CPU clock speed changes.
104  * The mutex locks both lists.
105  */
106 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
107 static struct srcu_notifier_head cpufreq_transition_notifier_list;
108
109 static bool init_cpufreq_transition_notifier_list_called;
110 static int __init init_cpufreq_transition_notifier_list(void)
111 {
112         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
113         init_cpufreq_transition_notifier_list_called = true;
114         return 0;
115 }
116 pure_initcall(init_cpufreq_transition_notifier_list);
117
118 static int off __read_mostly;
119 static int cpufreq_disabled(void)
120 {
121         return off;
122 }
123 void disable_cpufreq(void)
124 {
125         off = 1;
126 }
127 static LIST_HEAD(cpufreq_governor_list);
128 static DEFINE_MUTEX(cpufreq_governor_mutex);
129
130 bool have_governor_per_policy(void)
131 {
132         return cpufreq_driver->have_governor_per_policy;
133 }
134 EXPORT_SYMBOL_GPL(have_governor_per_policy);
135
136 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
137 {
138         if (have_governor_per_policy())
139                 return &policy->kobj;
140         else
141                 return cpufreq_global_kobject;
142 }
143 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
144
145 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
146 {
147         u64 idle_time;
148         u64 cur_wall_time;
149         u64 busy_time;
150
151         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
152
153         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
154         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
155         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
156         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
157         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
158         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
159
160         idle_time = cur_wall_time - busy_time;
161         if (wall)
162                 *wall = cputime_to_usecs(cur_wall_time);
163
164         return cputime_to_usecs(idle_time);
165 }
166
167 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
168 {
169         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
170
171         if (idle_time == -1ULL)
172                 return get_cpu_idle_time_jiffy(cpu, wall);
173         else if (!io_busy)
174                 idle_time += get_cpu_iowait_time_us(cpu, wall);
175
176         return idle_time;
177 }
178 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
179
180 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
181 {
182         struct cpufreq_policy *policy;
183         unsigned long flags;
184
185         if (cpu >= nr_cpu_ids)
186                 goto err_out;
187
188         /* get the cpufreq driver */
189         read_lock_irqsave(&cpufreq_driver_lock, flags);
190
191         if (!cpufreq_driver)
192                 goto err_out_unlock;
193
194         if (!try_module_get(cpufreq_driver->owner))
195                 goto err_out_unlock;
196
197         /* get the CPU */
198         policy = per_cpu(cpufreq_cpu_data, cpu);
199
200         if (!policy)
201                 goto err_out_put_module;
202
203         if (!sysfs && !kobject_get(&policy->kobj))
204                 goto err_out_put_module;
205
206         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
207         return policy;
208
209 err_out_put_module:
210         module_put(cpufreq_driver->owner);
211 err_out_unlock:
212         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
213 err_out:
214         return NULL;
215 }
216
217 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
218 {
219         if (cpufreq_disabled())
220                 return NULL;
221
222         return __cpufreq_cpu_get(cpu, false);
223 }
224 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
225
226 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
227 {
228         return __cpufreq_cpu_get(cpu, true);
229 }
230
231 static void __cpufreq_cpu_put(struct cpufreq_policy *policy, bool sysfs)
232 {
233         if (!sysfs)
234                 kobject_put(&policy->kobj);
235         module_put(cpufreq_driver->owner);
236 }
237
238 void cpufreq_cpu_put(struct cpufreq_policy *policy)
239 {
240         if (cpufreq_disabled())
241                 return;
242
243         __cpufreq_cpu_put(policy, false);
244 }
245 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
246
247 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *policy)
248 {
249         __cpufreq_cpu_put(policy, true);
250 }
251
252 /*********************************************************************
253  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
254  *********************************************************************/
255
256 /**
257  * adjust_jiffies - adjust the system "loops_per_jiffy"
258  *
259  * This function alters the system "loops_per_jiffy" for the clock
260  * speed change. Note that loops_per_jiffy cannot be updated on SMP
261  * systems as each CPU might be scaled differently. So, use the arch
262  * per-CPU loops_per_jiffy value wherever possible.
263  */
264 #ifndef CONFIG_SMP
265 static unsigned long l_p_j_ref;
266 static unsigned int l_p_j_ref_freq;
267
268 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
269 {
270         if (ci->flags & CPUFREQ_CONST_LOOPS)
271                 return;
272
273         if (!l_p_j_ref_freq) {
274                 l_p_j_ref = loops_per_jiffy;
275                 l_p_j_ref_freq = ci->old;
276                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
277                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
278         }
279         if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
280             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
281                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
282                                                                 ci->new);
283                 pr_debug("scaling loops_per_jiffy to %lu "
284                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
285         }
286 }
287 #else
288 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
289 {
290         return;
291 }
292 #endif
293
294 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
295                 struct cpufreq_freqs *freqs, unsigned int state)
296 {
297         BUG_ON(irqs_disabled());
298
299         if (cpufreq_disabled())
300                 return;
301
302         freqs->flags = cpufreq_driver->flags;
303         pr_debug("notification %u of frequency transition to %u kHz\n",
304                 state, freqs->new);
305
306         switch (state) {
307
308         case CPUFREQ_PRECHANGE:
309                 if (WARN(policy->transition_ongoing ==
310                                         cpumask_weight(policy->cpus),
311                                 "In middle of another frequency transition\n"))
312                         return;
313
314                 policy->transition_ongoing++;
315
316                 /* detect if the driver reported a value as "old frequency"
317                  * which is not equal to what the cpufreq core thinks is
318                  * "old frequency".
319                  */
320                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
321                         if ((policy) && (policy->cpu == freqs->cpu) &&
322                             (policy->cur) && (policy->cur != freqs->old)) {
323                                 pr_debug("Warning: CPU frequency is"
324                                         " %u, cpufreq assumed %u kHz.\n",
325                                         freqs->old, policy->cur);
326                                 freqs->old = policy->cur;
327                         }
328                 }
329                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
330                                 CPUFREQ_PRECHANGE, freqs);
331                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
332                 break;
333
334         case CPUFREQ_POSTCHANGE:
335                 if (WARN(!policy->transition_ongoing,
336                                 "No frequency transition in progress\n"))
337                         return;
338
339                 policy->transition_ongoing--;
340
341                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
342                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
343                         (unsigned long)freqs->cpu);
344                 trace_cpu_frequency(freqs->new, freqs->cpu);
345                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
346                                 CPUFREQ_POSTCHANGE, freqs);
347                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
348                         policy->cur = freqs->new;
349                 break;
350         }
351 }
352
353 /**
354  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
355  * on frequency transition.
356  *
357  * This function calls the transition notifiers and the "adjust_jiffies"
358  * function. It is called twice on all CPU frequency changes that have
359  * external effects.
360  */
361 void cpufreq_notify_transition(struct cpufreq_policy *policy,
362                 struct cpufreq_freqs *freqs, unsigned int state)
363 {
364         for_each_cpu(freqs->cpu, policy->cpus)
365                 __cpufreq_notify_transition(policy, freqs, state);
366 }
367 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368
369
370 /*********************************************************************
371  *                          SYSFS INTERFACE                          *
372  *********************************************************************/
373
374 static struct cpufreq_governor *__find_governor(const char *str_governor)
375 {
376         struct cpufreq_governor *t;
377
378         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
379                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
380                         return t;
381
382         return NULL;
383 }
384
385 /**
386  * cpufreq_parse_governor - parse a governor string
387  */
388 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
389                                 struct cpufreq_governor **governor)
390 {
391         int err = -EINVAL;
392
393         if (!cpufreq_driver)
394                 goto out;
395
396         if (cpufreq_driver->setpolicy) {
397                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
398                         *policy = CPUFREQ_POLICY_PERFORMANCE;
399                         err = 0;
400                 } else if (!strnicmp(str_governor, "powersave",
401                                                 CPUFREQ_NAME_LEN)) {
402                         *policy = CPUFREQ_POLICY_POWERSAVE;
403                         err = 0;
404                 }
405         } else if (cpufreq_driver->target) {
406                 struct cpufreq_governor *t;
407
408                 mutex_lock(&cpufreq_governor_mutex);
409
410                 t = __find_governor(str_governor);
411
412                 if (t == NULL) {
413                         int ret;
414
415                         mutex_unlock(&cpufreq_governor_mutex);
416                         ret = request_module("cpufreq_%s", str_governor);
417                         mutex_lock(&cpufreq_governor_mutex);
418
419                         if (ret == 0)
420                                 t = __find_governor(str_governor);
421                 }
422
423                 if (t != NULL) {
424                         *governor = t;
425                         err = 0;
426                 }
427
428                 mutex_unlock(&cpufreq_governor_mutex);
429         }
430 out:
431         return err;
432 }
433
434 /**
435  * cpufreq_per_cpu_attr_read() / show_##file_name() -
436  * print out cpufreq information
437  *
438  * Write out information from cpufreq_driver->policy[cpu]; object must be
439  * "unsigned int".
440  */
441
442 #define show_one(file_name, object)                     \
443 static ssize_t show_##file_name                         \
444 (struct cpufreq_policy *policy, char *buf)              \
445 {                                                       \
446         return sprintf(buf, "%u\n", policy->object);    \
447 }
448
449 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
450 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
451 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
452 show_one(scaling_min_freq, min);
453 show_one(scaling_max_freq, max);
454 show_one(scaling_cur_freq, cur);
455
456 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
457                                 struct cpufreq_policy *new_policy);
458
459 /**
460  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
461  */
462 #define store_one(file_name, object)                    \
463 static ssize_t store_##file_name                                        \
464 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
465 {                                                                       \
466         unsigned int ret;                                               \
467         struct cpufreq_policy new_policy;                               \
468                                                                         \
469         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
470         if (ret)                                                        \
471                 return -EINVAL;                                         \
472                                                                         \
473         ret = sscanf(buf, "%u", &new_policy.object);                    \
474         if (ret != 1)                                                   \
475                 return -EINVAL;                                         \
476                                                                         \
477         ret = __cpufreq_set_policy(policy, &new_policy);                \
478         policy->user_policy.object = policy->object;                    \
479                                                                         \
480         return ret ? ret : count;                                       \
481 }
482
483 store_one(scaling_min_freq, min);
484 store_one(scaling_max_freq, max);
485
486 /**
487  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
488  */
489 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
490                                         char *buf)
491 {
492         unsigned int cur_freq = __cpufreq_get(policy->cpu);
493         if (!cur_freq)
494                 return sprintf(buf, "<unknown>");
495         return sprintf(buf, "%u\n", cur_freq);
496 }
497
498 /**
499  * show_scaling_governor - show the current policy for the specified CPU
500  */
501 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
502 {
503         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
504                 return sprintf(buf, "powersave\n");
505         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
506                 return sprintf(buf, "performance\n");
507         else if (policy->governor)
508                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
509                                 policy->governor->name);
510         return -EINVAL;
511 }
512
513 /**
514  * store_scaling_governor - store policy for the specified CPU
515  */
516 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
517                                         const char *buf, size_t count)
518 {
519         unsigned int ret;
520         char    str_governor[16];
521         struct cpufreq_policy new_policy;
522
523         ret = cpufreq_get_policy(&new_policy, policy->cpu);
524         if (ret)
525                 return ret;
526
527         ret = sscanf(buf, "%15s", str_governor);
528         if (ret != 1)
529                 return -EINVAL;
530
531         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
532                                                 &new_policy.governor))
533                 return -EINVAL;
534
535         /*
536          * Do not use cpufreq_set_policy here or the user_policy.max
537          * will be wrongly overridden
538          */
539         ret = __cpufreq_set_policy(policy, &new_policy);
540
541         policy->user_policy.policy = policy->policy;
542         policy->user_policy.governor = policy->governor;
543
544         if (ret)
545                 return ret;
546         else
547                 return count;
548 }
549
550 /**
551  * show_scaling_driver - show the cpufreq driver currently loaded
552  */
553 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
554 {
555         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
556 }
557
558 /**
559  * show_scaling_available_governors - show the available CPUfreq governors
560  */
561 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
562                                                 char *buf)
563 {
564         ssize_t i = 0;
565         struct cpufreq_governor *t;
566
567         if (!cpufreq_driver->target) {
568                 i += sprintf(buf, "performance powersave");
569                 goto out;
570         }
571
572         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
573                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
574                     - (CPUFREQ_NAME_LEN + 2)))
575                         goto out;
576                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
577         }
578 out:
579         i += sprintf(&buf[i], "\n");
580         return i;
581 }
582
583 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
584 {
585         ssize_t i = 0;
586         unsigned int cpu;
587
588         for_each_cpu(cpu, mask) {
589                 if (i)
590                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
591                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
592                 if (i >= (PAGE_SIZE - 5))
593                         break;
594         }
595         i += sprintf(&buf[i], "\n");
596         return i;
597 }
598 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
599
600 /**
601  * show_related_cpus - show the CPUs affected by each transition even if
602  * hw coordination is in use
603  */
604 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
605 {
606         return cpufreq_show_cpus(policy->related_cpus, buf);
607 }
608
609 /**
610  * show_affected_cpus - show the CPUs affected by each transition
611  */
612 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
613 {
614         return cpufreq_show_cpus(policy->cpus, buf);
615 }
616
617 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
618                                         const char *buf, size_t count)
619 {
620         unsigned int freq = 0;
621         unsigned int ret;
622
623         if (!policy->governor || !policy->governor->store_setspeed)
624                 return -EINVAL;
625
626         ret = sscanf(buf, "%u", &freq);
627         if (ret != 1)
628                 return -EINVAL;
629
630         policy->governor->store_setspeed(policy, freq);
631
632         return count;
633 }
634
635 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
636 {
637         if (!policy->governor || !policy->governor->show_setspeed)
638                 return sprintf(buf, "<unsupported>\n");
639
640         return policy->governor->show_setspeed(policy, buf);
641 }
642
643 /**
644  * show_bios_limit - show the current cpufreq HW/BIOS limitation
645  */
646 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
647 {
648         unsigned int limit;
649         int ret;
650         if (cpufreq_driver->bios_limit) {
651                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
652                 if (!ret)
653                         return sprintf(buf, "%u\n", limit);
654         }
655         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
656 }
657
658 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
659 cpufreq_freq_attr_ro(cpuinfo_min_freq);
660 cpufreq_freq_attr_ro(cpuinfo_max_freq);
661 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
662 cpufreq_freq_attr_ro(scaling_available_governors);
663 cpufreq_freq_attr_ro(scaling_driver);
664 cpufreq_freq_attr_ro(scaling_cur_freq);
665 cpufreq_freq_attr_ro(bios_limit);
666 cpufreq_freq_attr_ro(related_cpus);
667 cpufreq_freq_attr_ro(affected_cpus);
668 cpufreq_freq_attr_rw(scaling_min_freq);
669 cpufreq_freq_attr_rw(scaling_max_freq);
670 cpufreq_freq_attr_rw(scaling_governor);
671 cpufreq_freq_attr_rw(scaling_setspeed);
672
673 static struct attribute *default_attrs[] = {
674         &cpuinfo_min_freq.attr,
675         &cpuinfo_max_freq.attr,
676         &cpuinfo_transition_latency.attr,
677         &scaling_min_freq.attr,
678         &scaling_max_freq.attr,
679         &affected_cpus.attr,
680         &related_cpus.attr,
681         &scaling_governor.attr,
682         &scaling_driver.attr,
683         &scaling_available_governors.attr,
684         &scaling_setspeed.attr,
685         NULL
686 };
687
688 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
689 #define to_attr(a) container_of(a, struct freq_attr, attr)
690
691 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
692 {
693         struct cpufreq_policy *policy = to_policy(kobj);
694         struct freq_attr *fattr = to_attr(attr);
695         ssize_t ret = -EINVAL;
696         policy = cpufreq_cpu_get_sysfs(policy->cpu);
697         if (!policy)
698                 goto no_policy;
699
700         if (lock_policy_rwsem_read(policy->cpu) < 0)
701                 goto fail;
702
703         if (fattr->show)
704                 ret = fattr->show(policy, buf);
705         else
706                 ret = -EIO;
707
708         unlock_policy_rwsem_read(policy->cpu);
709 fail:
710         cpufreq_cpu_put_sysfs(policy);
711 no_policy:
712         return ret;
713 }
714
715 static ssize_t store(struct kobject *kobj, struct attribute *attr,
716                      const char *buf, size_t count)
717 {
718         struct cpufreq_policy *policy = to_policy(kobj);
719         struct freq_attr *fattr = to_attr(attr);
720         ssize_t ret = -EINVAL;
721         policy = cpufreq_cpu_get_sysfs(policy->cpu);
722         if (!policy)
723                 goto no_policy;
724
725         if (lock_policy_rwsem_write(policy->cpu) < 0)
726                 goto fail;
727
728         if (fattr->store)
729                 ret = fattr->store(policy, buf, count);
730         else
731                 ret = -EIO;
732
733         unlock_policy_rwsem_write(policy->cpu);
734 fail:
735         cpufreq_cpu_put_sysfs(policy);
736 no_policy:
737         return ret;
738 }
739
740 static void cpufreq_sysfs_release(struct kobject *kobj)
741 {
742         struct cpufreq_policy *policy = to_policy(kobj);
743         pr_debug("last reference is dropped\n");
744         complete(&policy->kobj_unregister);
745 }
746
747 static const struct sysfs_ops sysfs_ops = {
748         .show   = show,
749         .store  = store,
750 };
751
752 static struct kobj_type ktype_cpufreq = {
753         .sysfs_ops      = &sysfs_ops,
754         .default_attrs  = default_attrs,
755         .release        = cpufreq_sysfs_release,
756 };
757
758 struct kobject *cpufreq_global_kobject;
759 EXPORT_SYMBOL(cpufreq_global_kobject);
760
761 static int cpufreq_global_kobject_usage;
762
763 int cpufreq_get_global_kobject(void)
764 {
765         if (!cpufreq_global_kobject_usage++)
766                 return kobject_add(cpufreq_global_kobject,
767                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
768
769         return 0;
770 }
771 EXPORT_SYMBOL(cpufreq_get_global_kobject);
772
773 void cpufreq_put_global_kobject(void)
774 {
775         if (!--cpufreq_global_kobject_usage)
776                 kobject_del(cpufreq_global_kobject);
777 }
778 EXPORT_SYMBOL(cpufreq_put_global_kobject);
779
780 int cpufreq_sysfs_create_file(const struct attribute *attr)
781 {
782         int ret = cpufreq_get_global_kobject();
783
784         if (!ret) {
785                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
786                 if (ret)
787                         cpufreq_put_global_kobject();
788         }
789
790         return ret;
791 }
792 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
793
794 void cpufreq_sysfs_remove_file(const struct attribute *attr)
795 {
796         sysfs_remove_file(cpufreq_global_kobject, attr);
797         cpufreq_put_global_kobject();
798 }
799 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
800
801 /* symlink affected CPUs */
802 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
803 {
804         unsigned int j;
805         int ret = 0;
806
807         for_each_cpu(j, policy->cpus) {
808                 struct device *cpu_dev;
809
810                 if (j == policy->cpu)
811                         continue;
812
813                 pr_debug("Adding link for CPU: %u\n", j);
814                 cpu_dev = get_cpu_device(j);
815                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
816                                         "cpufreq");
817                 if (ret)
818                         break;
819         }
820         return ret;
821 }
822
823 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
824                                      struct device *dev)
825 {
826         struct freq_attr **drv_attr;
827         int ret = 0;
828
829         /* prepare interface data */
830         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
831                                    &dev->kobj, "cpufreq");
832         if (ret)
833                 return ret;
834
835         /* set up files for this cpu device */
836         drv_attr = cpufreq_driver->attr;
837         while ((drv_attr) && (*drv_attr)) {
838                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
839                 if (ret)
840                         goto err_out_kobj_put;
841                 drv_attr++;
842         }
843         if (cpufreq_driver->get) {
844                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
845                 if (ret)
846                         goto err_out_kobj_put;
847         }
848         if (cpufreq_driver->target) {
849                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
850                 if (ret)
851                         goto err_out_kobj_put;
852         }
853         if (cpufreq_driver->bios_limit) {
854                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
855                 if (ret)
856                         goto err_out_kobj_put;
857         }
858
859         ret = cpufreq_add_dev_symlink(policy);
860         if (ret)
861                 goto err_out_kobj_put;
862
863         return ret;
864
865 err_out_kobj_put:
866         kobject_put(&policy->kobj);
867         wait_for_completion(&policy->kobj_unregister);
868         return ret;
869 }
870
871 static void cpufreq_init_policy(struct cpufreq_policy *policy)
872 {
873         struct cpufreq_policy new_policy;
874         int ret = 0;
875
876         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
877         /* assure that the starting sequence is run in __cpufreq_set_policy */
878         policy->governor = NULL;
879
880         /* set default policy */
881         ret = __cpufreq_set_policy(policy, &new_policy);
882         policy->user_policy.policy = policy->policy;
883         policy->user_policy.governor = policy->governor;
884
885         if (ret) {
886                 pr_debug("setting policy failed\n");
887                 if (cpufreq_driver->exit)
888                         cpufreq_driver->exit(policy);
889         }
890 }
891
892 #ifdef CONFIG_HOTPLUG_CPU
893 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
894                                   unsigned int cpu, struct device *dev,
895                                   bool frozen)
896 {
897         int ret = 0, has_target = !!cpufreq_driver->target;
898         unsigned long flags;
899
900         if (has_target)
901                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
902
903         lock_policy_rwsem_write(policy->cpu);
904
905         write_lock_irqsave(&cpufreq_driver_lock, flags);
906
907         cpumask_set_cpu(cpu, policy->cpus);
908         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
909         per_cpu(cpufreq_cpu_data, cpu) = policy;
910         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
911
912         unlock_policy_rwsem_write(policy->cpu);
913
914         if (has_target) {
915                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
916                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
917         }
918
919         /* Don't touch sysfs links during light-weight init */
920         if (!frozen)
921                 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
922
923         return ret;
924 }
925 #endif
926
927 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
928 {
929         struct cpufreq_policy *policy;
930         unsigned long flags;
931
932         write_lock_irqsave(&cpufreq_driver_lock, flags);
933
934         policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
935
936         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
937
938         return policy;
939 }
940
941 static struct cpufreq_policy *cpufreq_policy_alloc(void)
942 {
943         struct cpufreq_policy *policy;
944
945         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
946         if (!policy)
947                 return NULL;
948
949         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
950                 goto err_free_policy;
951
952         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
953                 goto err_free_cpumask;
954
955         return policy;
956
957 err_free_cpumask:
958         free_cpumask_var(policy->cpus);
959 err_free_policy:
960         kfree(policy);
961
962         return NULL;
963 }
964
965 static void cpufreq_policy_free(struct cpufreq_policy *policy)
966 {
967         free_cpumask_var(policy->related_cpus);
968         free_cpumask_var(policy->cpus);
969         kfree(policy);
970 }
971
972 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
973                              bool frozen)
974 {
975         unsigned int j, cpu = dev->id;
976         int ret = -ENOMEM;
977         struct cpufreq_policy *policy;
978         unsigned long flags;
979 #ifdef CONFIG_HOTPLUG_CPU
980         struct cpufreq_governor *gov;
981         int sibling;
982 #endif
983
984         if (cpu_is_offline(cpu))
985                 return 0;
986
987         pr_debug("adding CPU %u\n", cpu);
988
989 #ifdef CONFIG_SMP
990         /* check whether a different CPU already registered this
991          * CPU because it is in the same boat. */
992         policy = cpufreq_cpu_get(cpu);
993         if (unlikely(policy)) {
994                 cpufreq_cpu_put(policy);
995                 return 0;
996         }
997
998 #ifdef CONFIG_HOTPLUG_CPU
999         /* Check if this cpu was hot-unplugged earlier and has siblings */
1000         read_lock_irqsave(&cpufreq_driver_lock, flags);
1001         for_each_online_cpu(sibling) {
1002                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1003                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
1004                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1005                         return cpufreq_add_policy_cpu(cp, cpu, dev, frozen);
1006                 }
1007         }
1008         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1009 #endif
1010 #endif
1011
1012         if (!try_module_get(cpufreq_driver->owner)) {
1013                 ret = -EINVAL;
1014                 goto module_out;
1015         }
1016
1017         if (frozen)
1018                 /* Restore the saved policy when doing light-weight init */
1019                 policy = cpufreq_policy_restore(cpu);
1020         else
1021                 policy = cpufreq_policy_alloc();
1022
1023         if (!policy)
1024                 goto nomem_out;
1025
1026         policy->cpu = cpu;
1027         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1028         cpumask_copy(policy->cpus, cpumask_of(cpu));
1029
1030         /* Initially set CPU itself as the policy_cpu */
1031         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1032
1033         init_completion(&policy->kobj_unregister);
1034         INIT_WORK(&policy->update, handle_update);
1035
1036         /* call driver. From then on the cpufreq must be able
1037          * to accept all calls to ->verify and ->setpolicy for this CPU
1038          */
1039         ret = cpufreq_driver->init(policy);
1040         if (ret) {
1041                 pr_debug("initialization failed\n");
1042                 goto err_set_policy_cpu;
1043         }
1044
1045         /* related cpus should atleast have policy->cpus */
1046         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1047
1048         /*
1049          * affected cpus must always be the one, which are online. We aren't
1050          * managing offline cpus here.
1051          */
1052         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1053
1054         policy->user_policy.min = policy->min;
1055         policy->user_policy.max = policy->max;
1056
1057         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1058                                      CPUFREQ_START, policy);
1059
1060 #ifdef CONFIG_HOTPLUG_CPU
1061         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1062         if (gov) {
1063                 policy->governor = gov;
1064                 pr_debug("Restoring governor %s for cpu %d\n",
1065                        policy->governor->name, cpu);
1066         }
1067 #endif
1068
1069         write_lock_irqsave(&cpufreq_driver_lock, flags);
1070         for_each_cpu(j, policy->cpus) {
1071                 per_cpu(cpufreq_cpu_data, j) = policy;
1072                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1073         }
1074         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075
1076         if (!frozen) {
1077                 ret = cpufreq_add_dev_interface(policy, dev);
1078                 if (ret)
1079                         goto err_out_unregister;
1080         }
1081
1082         cpufreq_init_policy(policy);
1083
1084         kobject_uevent(&policy->kobj, KOBJ_ADD);
1085         module_put(cpufreq_driver->owner);
1086         pr_debug("initialization complete\n");
1087
1088         return 0;
1089
1090 err_out_unregister:
1091         write_lock_irqsave(&cpufreq_driver_lock, flags);
1092         for_each_cpu(j, policy->cpus) {
1093                 per_cpu(cpufreq_cpu_data, j) = NULL;
1094                 if (j != cpu)
1095                         per_cpu(cpufreq_policy_cpu, j) = -1;
1096         }
1097         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1098
1099 err_set_policy_cpu:
1100         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1101         cpufreq_policy_free(policy);
1102 nomem_out:
1103         module_put(cpufreq_driver->owner);
1104 module_out:
1105         return ret;
1106 }
1107
1108 /**
1109  * cpufreq_add_dev - add a CPU device
1110  *
1111  * Adds the cpufreq interface for a CPU device.
1112  *
1113  * The Oracle says: try running cpufreq registration/unregistration concurrently
1114  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1115  * mess up, but more thorough testing is needed. - Mathieu
1116  */
1117 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118 {
1119         return __cpufreq_add_dev(dev, sif, false);
1120 }
1121
1122 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1123 {
1124         int j;
1125
1126         policy->last_cpu = policy->cpu;
1127         policy->cpu = cpu;
1128
1129         for_each_cpu(j, policy->cpus)
1130                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1131
1132 #ifdef CONFIG_CPU_FREQ_TABLE
1133         cpufreq_frequency_table_update_policy_cpu(policy);
1134 #endif
1135         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1136                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1137 }
1138
1139 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1140                                            unsigned int old_cpu, bool frozen)
1141 {
1142         struct device *cpu_dev;
1143         unsigned long flags;
1144         int ret;
1145
1146         /* first sibling now owns the new sysfs dir */
1147         cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
1148
1149         /* Don't touch sysfs files during light-weight tear-down */
1150         if (frozen)
1151                 return cpu_dev->id;
1152
1153         sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1154         ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1155         if (ret) {
1156                 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1157
1158                 WARN_ON(lock_policy_rwsem_write(old_cpu));
1159                 cpumask_set_cpu(old_cpu, policy->cpus);
1160
1161                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1162                 per_cpu(cpufreq_cpu_data, old_cpu) = policy;
1163                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164
1165                 unlock_policy_rwsem_write(old_cpu);
1166
1167                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1168                                         "cpufreq");
1169
1170                 return -EINVAL;
1171         }
1172
1173         return cpu_dev->id;
1174 }
1175
1176 /**
1177  * __cpufreq_remove_dev - remove a CPU device
1178  *
1179  * Removes the cpufreq interface for a CPU device.
1180  * Caller should already have policy_rwsem in write mode for this CPU.
1181  * This routine frees the rwsem before returning.
1182  */
1183 static int __cpufreq_remove_dev(struct device *dev,
1184                                 struct subsys_interface *sif, bool frozen)
1185 {
1186         unsigned int cpu = dev->id, cpus;
1187         int new_cpu;
1188         unsigned long flags;
1189         struct cpufreq_policy *policy;
1190         struct kobject *kobj;
1191         struct completion *cmp;
1192
1193         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1194
1195         write_lock_irqsave(&cpufreq_driver_lock, flags);
1196
1197         policy = per_cpu(cpufreq_cpu_data, cpu);
1198         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1199
1200         /* Save the policy somewhere when doing a light-weight tear-down */
1201         if (frozen)
1202                 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1203
1204         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1205
1206         if (!policy) {
1207                 pr_debug("%s: No cpu_data found\n", __func__);
1208                 return -EINVAL;
1209         }
1210
1211         if (cpufreq_driver->target)
1212                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1213
1214 #ifdef CONFIG_HOTPLUG_CPU
1215         if (!cpufreq_driver->setpolicy)
1216                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1217                         policy->governor->name, CPUFREQ_NAME_LEN);
1218 #endif
1219
1220         WARN_ON(lock_policy_rwsem_write(cpu));
1221         cpus = cpumask_weight(policy->cpus);
1222
1223         if (cpus > 1)
1224                 cpumask_clear_cpu(cpu, policy->cpus);
1225         unlock_policy_rwsem_write(cpu);
1226
1227         if (cpu != policy->cpu && !frozen) {
1228                 sysfs_remove_link(&dev->kobj, "cpufreq");
1229         } else if (cpus > 1) {
1230
1231                 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1232                 if (new_cpu >= 0) {
1233                         WARN_ON(lock_policy_rwsem_write(cpu));
1234                         update_policy_cpu(policy, new_cpu);
1235                         unlock_policy_rwsem_write(cpu);
1236
1237                         if (!frozen) {
1238                                 pr_debug("%s: policy Kobject moved to cpu: %d "
1239                                          "from: %d\n",__func__, new_cpu, cpu);
1240                         }
1241                 }
1242         }
1243
1244         /* If cpu is last user of policy, free policy */
1245         if (cpus == 1) {
1246                 if (cpufreq_driver->target)
1247                         __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1248
1249                 if (!frozen) {
1250                         lock_policy_rwsem_read(cpu);
1251                         kobj = &policy->kobj;
1252                         cmp = &policy->kobj_unregister;
1253                         unlock_policy_rwsem_read(cpu);
1254                         kobject_put(kobj);
1255
1256                         /*
1257                          * We need to make sure that the underlying kobj is
1258                          * actually not referenced anymore by anybody before we
1259                          * proceed with unloading.
1260                          */
1261                         pr_debug("waiting for dropping of refcount\n");
1262                         wait_for_completion(cmp);
1263                         pr_debug("wait complete\n");
1264                 }
1265
1266                 /*
1267                  * Perform the ->exit() even during light-weight tear-down,
1268                  * since this is a core component, and is essential for the
1269                  * subsequent light-weight ->init() to succeed.
1270                  */
1271                 if (cpufreq_driver->exit)
1272                         cpufreq_driver->exit(policy);
1273
1274                 if (!frozen)
1275                         cpufreq_policy_free(policy);
1276         } else {
1277                 if (cpufreq_driver->target) {
1278                         __cpufreq_governor(policy, CPUFREQ_GOV_START);
1279                         __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1280                 }
1281         }
1282
1283         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1284         return 0;
1285 }
1286
1287 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1288 {
1289         unsigned int cpu = dev->id;
1290         int retval;
1291
1292         if (cpu_is_offline(cpu))
1293                 return 0;
1294
1295         retval = __cpufreq_remove_dev(dev, sif, false);
1296         return retval;
1297 }
1298
1299 static void handle_update(struct work_struct *work)
1300 {
1301         struct cpufreq_policy *policy =
1302                 container_of(work, struct cpufreq_policy, update);
1303         unsigned int cpu = policy->cpu;
1304         pr_debug("handle_update for cpu %u called\n", cpu);
1305         cpufreq_update_policy(cpu);
1306 }
1307
1308 /**
1309  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1310  *      in deep trouble.
1311  *      @cpu: cpu number
1312  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1313  *      @new_freq: CPU frequency the CPU actually runs at
1314  *
1315  *      We adjust to current frequency first, and need to clean up later.
1316  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1317  */
1318 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1319                                 unsigned int new_freq)
1320 {
1321         struct cpufreq_policy *policy;
1322         struct cpufreq_freqs freqs;
1323         unsigned long flags;
1324
1325         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1326                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1327
1328         freqs.old = old_freq;
1329         freqs.new = new_freq;
1330
1331         read_lock_irqsave(&cpufreq_driver_lock, flags);
1332         policy = per_cpu(cpufreq_cpu_data, cpu);
1333         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1334
1335         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1336         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1337 }
1338
1339 /**
1340  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1341  * @cpu: CPU number
1342  *
1343  * This is the last known freq, without actually getting it from the driver.
1344  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1345  */
1346 unsigned int cpufreq_quick_get(unsigned int cpu)
1347 {
1348         struct cpufreq_policy *policy;
1349         unsigned int ret_freq = 0;
1350
1351         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1352                 return cpufreq_driver->get(cpu);
1353
1354         policy = cpufreq_cpu_get(cpu);
1355         if (policy) {
1356                 ret_freq = policy->cur;
1357                 cpufreq_cpu_put(policy);
1358         }
1359
1360         return ret_freq;
1361 }
1362 EXPORT_SYMBOL(cpufreq_quick_get);
1363
1364 /**
1365  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1366  * @cpu: CPU number
1367  *
1368  * Just return the max possible frequency for a given CPU.
1369  */
1370 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1371 {
1372         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1373         unsigned int ret_freq = 0;
1374
1375         if (policy) {
1376                 ret_freq = policy->max;
1377                 cpufreq_cpu_put(policy);
1378         }
1379
1380         return ret_freq;
1381 }
1382 EXPORT_SYMBOL(cpufreq_quick_get_max);
1383
1384 static unsigned int __cpufreq_get(unsigned int cpu)
1385 {
1386         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1387         unsigned int ret_freq = 0;
1388
1389         if (!cpufreq_driver->get)
1390                 return ret_freq;
1391
1392         ret_freq = cpufreq_driver->get(cpu);
1393
1394         if (ret_freq && policy->cur &&
1395                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1396                 /* verify no discrepancy between actual and
1397                                         saved value exists */
1398                 if (unlikely(ret_freq != policy->cur)) {
1399                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1400                         schedule_work(&policy->update);
1401                 }
1402         }
1403
1404         return ret_freq;
1405 }
1406
1407 /**
1408  * cpufreq_get - get the current CPU frequency (in kHz)
1409  * @cpu: CPU number
1410  *
1411  * Get the CPU current (static) CPU frequency
1412  */
1413 unsigned int cpufreq_get(unsigned int cpu)
1414 {
1415         unsigned int ret_freq = 0;
1416         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1417
1418         if (!policy)
1419                 goto out;
1420
1421         if (unlikely(lock_policy_rwsem_read(cpu)))
1422                 goto out_policy;
1423
1424         ret_freq = __cpufreq_get(cpu);
1425
1426         unlock_policy_rwsem_read(cpu);
1427
1428 out_policy:
1429         cpufreq_cpu_put(policy);
1430 out:
1431         return ret_freq;
1432 }
1433 EXPORT_SYMBOL(cpufreq_get);
1434
1435 static struct subsys_interface cpufreq_interface = {
1436         .name           = "cpufreq",
1437         .subsys         = &cpu_subsys,
1438         .add_dev        = cpufreq_add_dev,
1439         .remove_dev     = cpufreq_remove_dev,
1440 };
1441
1442 /**
1443  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1444  *
1445  * This function is only executed for the boot processor.  The other CPUs
1446  * have been put offline by means of CPU hotplug.
1447  */
1448 static int cpufreq_bp_suspend(void)
1449 {
1450         int ret = 0;
1451
1452         int cpu = smp_processor_id();
1453         struct cpufreq_policy *policy;
1454
1455         pr_debug("suspending cpu %u\n", cpu);
1456
1457         /* If there's no policy for the boot CPU, we have nothing to do. */
1458         policy = cpufreq_cpu_get(cpu);
1459         if (!policy)
1460                 return 0;
1461
1462         if (cpufreq_driver->suspend) {
1463                 ret = cpufreq_driver->suspend(policy);
1464                 if (ret)
1465                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1466                                         "step on CPU %u\n", policy->cpu);
1467         }
1468
1469         cpufreq_cpu_put(policy);
1470         return ret;
1471 }
1472
1473 /**
1474  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1475  *
1476  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1477  *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1478  *          restored. It will verify that the current freq is in sync with
1479  *          what we believe it to be. This is a bit later than when it
1480  *          should be, but nonethteless it's better than calling
1481  *          cpufreq_driver->get() here which might re-enable interrupts...
1482  *
1483  * This function is only executed for the boot CPU.  The other CPUs have not
1484  * been turned on yet.
1485  */
1486 static void cpufreq_bp_resume(void)
1487 {
1488         int ret = 0;
1489
1490         int cpu = smp_processor_id();
1491         struct cpufreq_policy *policy;
1492
1493         pr_debug("resuming cpu %u\n", cpu);
1494
1495         /* If there's no policy for the boot CPU, we have nothing to do. */
1496         policy = cpufreq_cpu_get(cpu);
1497         if (!policy)
1498                 return;
1499
1500         if (cpufreq_driver->resume) {
1501                 ret = cpufreq_driver->resume(policy);
1502                 if (ret) {
1503                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1504                                         "step on CPU %u\n", policy->cpu);
1505                         goto fail;
1506                 }
1507         }
1508
1509         schedule_work(&policy->update);
1510
1511 fail:
1512         cpufreq_cpu_put(policy);
1513 }
1514
1515 static struct syscore_ops cpufreq_syscore_ops = {
1516         .suspend        = cpufreq_bp_suspend,
1517         .resume         = cpufreq_bp_resume,
1518 };
1519
1520 /**
1521  *      cpufreq_get_current_driver - return current driver's name
1522  *
1523  *      Return the name string of the currently loaded cpufreq driver
1524  *      or NULL, if none.
1525  */
1526 const char *cpufreq_get_current_driver(void)
1527 {
1528         if (cpufreq_driver)
1529                 return cpufreq_driver->name;
1530
1531         return NULL;
1532 }
1533 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1534
1535 /*********************************************************************
1536  *                     NOTIFIER LISTS INTERFACE                      *
1537  *********************************************************************/
1538
1539 /**
1540  *      cpufreq_register_notifier - register a driver with cpufreq
1541  *      @nb: notifier function to register
1542  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1543  *
1544  *      Add a driver to one of two lists: either a list of drivers that
1545  *      are notified about clock rate changes (once before and once after
1546  *      the transition), or a list of drivers that are notified about
1547  *      changes in cpufreq policy.
1548  *
1549  *      This function may sleep, and has the same return conditions as
1550  *      blocking_notifier_chain_register.
1551  */
1552 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1553 {
1554         int ret;
1555
1556         if (cpufreq_disabled())
1557                 return -EINVAL;
1558
1559         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1560
1561         switch (list) {
1562         case CPUFREQ_TRANSITION_NOTIFIER:
1563                 ret = srcu_notifier_chain_register(
1564                                 &cpufreq_transition_notifier_list, nb);
1565                 break;
1566         case CPUFREQ_POLICY_NOTIFIER:
1567                 ret = blocking_notifier_chain_register(
1568                                 &cpufreq_policy_notifier_list, nb);
1569                 break;
1570         default:
1571                 ret = -EINVAL;
1572         }
1573
1574         return ret;
1575 }
1576 EXPORT_SYMBOL(cpufreq_register_notifier);
1577
1578 /**
1579  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1580  *      @nb: notifier block to be unregistered
1581  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1582  *
1583  *      Remove a driver from the CPU frequency notifier list.
1584  *
1585  *      This function may sleep, and has the same return conditions as
1586  *      blocking_notifier_chain_unregister.
1587  */
1588 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1589 {
1590         int ret;
1591
1592         if (cpufreq_disabled())
1593                 return -EINVAL;
1594
1595         switch (list) {
1596         case CPUFREQ_TRANSITION_NOTIFIER:
1597                 ret = srcu_notifier_chain_unregister(
1598                                 &cpufreq_transition_notifier_list, nb);
1599                 break;
1600         case CPUFREQ_POLICY_NOTIFIER:
1601                 ret = blocking_notifier_chain_unregister(
1602                                 &cpufreq_policy_notifier_list, nb);
1603                 break;
1604         default:
1605                 ret = -EINVAL;
1606         }
1607
1608         return ret;
1609 }
1610 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1611
1612
1613 /*********************************************************************
1614  *                              GOVERNORS                            *
1615  *********************************************************************/
1616
1617 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1618                             unsigned int target_freq,
1619                             unsigned int relation)
1620 {
1621         int retval = -EINVAL;
1622         unsigned int old_target_freq = target_freq;
1623
1624         if (cpufreq_disabled())
1625                 return -ENODEV;
1626         if (policy->transition_ongoing)
1627                 return -EBUSY;
1628
1629         /* Make sure that target_freq is within supported range */
1630         if (target_freq > policy->max)
1631                 target_freq = policy->max;
1632         if (target_freq < policy->min)
1633                 target_freq = policy->min;
1634
1635         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1636                         policy->cpu, target_freq, relation, old_target_freq);
1637
1638         if (target_freq == policy->cur)
1639                 return 0;
1640
1641         if (cpufreq_driver->target)
1642                 retval = cpufreq_driver->target(policy, target_freq, relation);
1643
1644         return retval;
1645 }
1646 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1647
1648 int cpufreq_driver_target(struct cpufreq_policy *policy,
1649                           unsigned int target_freq,
1650                           unsigned int relation)
1651 {
1652         int ret = -EINVAL;
1653
1654         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1655                 goto fail;
1656
1657         ret = __cpufreq_driver_target(policy, target_freq, relation);
1658
1659         unlock_policy_rwsem_write(policy->cpu);
1660
1661 fail:
1662         return ret;
1663 }
1664 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1665
1666 /*
1667  * when "event" is CPUFREQ_GOV_LIMITS
1668  */
1669
1670 static int __cpufreq_governor(struct cpufreq_policy *policy,
1671                                         unsigned int event)
1672 {
1673         int ret;
1674
1675         /* Only must be defined when default governor is known to have latency
1676            restrictions, like e.g. conservative or ondemand.
1677            That this is the case is already ensured in Kconfig
1678         */
1679 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1680         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1681 #else
1682         struct cpufreq_governor *gov = NULL;
1683 #endif
1684
1685         if (policy->governor->max_transition_latency &&
1686             policy->cpuinfo.transition_latency >
1687             policy->governor->max_transition_latency) {
1688                 if (!gov)
1689                         return -EINVAL;
1690                 else {
1691                         printk(KERN_WARNING "%s governor failed, too long"
1692                                " transition latency of HW, fallback"
1693                                " to %s governor\n",
1694                                policy->governor->name,
1695                                gov->name);
1696                         policy->governor = gov;
1697                 }
1698         }
1699
1700         if (!try_module_get(policy->governor->owner))
1701                 return -EINVAL;
1702
1703         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1704                                                 policy->cpu, event);
1705
1706         mutex_lock(&cpufreq_governor_lock);
1707         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1708             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1709                 mutex_unlock(&cpufreq_governor_lock);
1710                 return -EBUSY;
1711         }
1712
1713         if (event == CPUFREQ_GOV_STOP)
1714                 policy->governor_enabled = false;
1715         else if (event == CPUFREQ_GOV_START)
1716                 policy->governor_enabled = true;
1717
1718         mutex_unlock(&cpufreq_governor_lock);
1719
1720         ret = policy->governor->governor(policy, event);
1721
1722         if (!ret) {
1723                 if (event == CPUFREQ_GOV_POLICY_INIT)
1724                         policy->governor->initialized++;
1725                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1726                         policy->governor->initialized--;
1727         } else {
1728                 /* Restore original values */
1729                 mutex_lock(&cpufreq_governor_lock);
1730                 if (event == CPUFREQ_GOV_STOP)
1731                         policy->governor_enabled = true;
1732                 else if (event == CPUFREQ_GOV_START)
1733                         policy->governor_enabled = false;
1734                 mutex_unlock(&cpufreq_governor_lock);
1735         }
1736
1737         /* we keep one module reference alive for
1738                         each CPU governed by this CPU */
1739         if ((event != CPUFREQ_GOV_START) || ret)
1740                 module_put(policy->governor->owner);
1741         if ((event == CPUFREQ_GOV_STOP) && !ret)
1742                 module_put(policy->governor->owner);
1743
1744         return ret;
1745 }
1746
1747 int cpufreq_register_governor(struct cpufreq_governor *governor)
1748 {
1749         int err;
1750
1751         if (!governor)
1752                 return -EINVAL;
1753
1754         if (cpufreq_disabled())
1755                 return -ENODEV;
1756
1757         mutex_lock(&cpufreq_governor_mutex);
1758
1759         governor->initialized = 0;
1760         err = -EBUSY;
1761         if (__find_governor(governor->name) == NULL) {
1762                 err = 0;
1763                 list_add(&governor->governor_list, &cpufreq_governor_list);
1764         }
1765
1766         mutex_unlock(&cpufreq_governor_mutex);
1767         return err;
1768 }
1769 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1770
1771 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1772 {
1773 #ifdef CONFIG_HOTPLUG_CPU
1774         int cpu;
1775 #endif
1776
1777         if (!governor)
1778                 return;
1779
1780         if (cpufreq_disabled())
1781                 return;
1782
1783 #ifdef CONFIG_HOTPLUG_CPU
1784         for_each_present_cpu(cpu) {
1785                 if (cpu_online(cpu))
1786                         continue;
1787                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1788                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1789         }
1790 #endif
1791
1792         mutex_lock(&cpufreq_governor_mutex);
1793         list_del(&governor->governor_list);
1794         mutex_unlock(&cpufreq_governor_mutex);
1795         return;
1796 }
1797 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1798
1799
1800 /*********************************************************************
1801  *                          POLICY INTERFACE                         *
1802  *********************************************************************/
1803
1804 /**
1805  * cpufreq_get_policy - get the current cpufreq_policy
1806  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1807  *      is written
1808  *
1809  * Reads the current cpufreq policy.
1810  */
1811 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1812 {
1813         struct cpufreq_policy *cpu_policy;
1814         if (!policy)
1815                 return -EINVAL;
1816
1817         cpu_policy = cpufreq_cpu_get(cpu);
1818         if (!cpu_policy)
1819                 return -EINVAL;
1820
1821         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1822
1823         cpufreq_cpu_put(cpu_policy);
1824         return 0;
1825 }
1826 EXPORT_SYMBOL(cpufreq_get_policy);
1827
1828 /*
1829  * data   : current policy.
1830  * policy : policy to be set.
1831  */
1832 static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1833                                 struct cpufreq_policy *new_policy)
1834 {
1835         int ret = 0, failed = 1;
1836
1837         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1838                 new_policy->min, new_policy->max);
1839
1840         memcpy(&new_policy->cpuinfo, &policy->cpuinfo,
1841                                 sizeof(struct cpufreq_cpuinfo));
1842
1843         if (new_policy->min > policy->max || new_policy->max < policy->min) {
1844                 ret = -EINVAL;
1845                 goto error_out;
1846         }
1847
1848         /* verify the cpu speed can be set within this limit */
1849         ret = cpufreq_driver->verify(new_policy);
1850         if (ret)
1851                 goto error_out;
1852
1853         /* adjust if necessary - all reasons */
1854         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1855                         CPUFREQ_ADJUST, new_policy);
1856
1857         /* adjust if necessary - hardware incompatibility*/
1858         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1859                         CPUFREQ_INCOMPATIBLE, new_policy);
1860
1861         /*
1862          * verify the cpu speed can be set within this limit, which might be
1863          * different to the first one
1864          */
1865         ret = cpufreq_driver->verify(new_policy);
1866         if (ret)
1867                 goto error_out;
1868
1869         /* notification of the new policy */
1870         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1871                         CPUFREQ_NOTIFY, new_policy);
1872
1873         policy->min = new_policy->min;
1874         policy->max = new_policy->max;
1875
1876         pr_debug("new min and max freqs are %u - %u kHz\n",
1877                                         policy->min, policy->max);
1878
1879         if (cpufreq_driver->setpolicy) {
1880                 policy->policy = new_policy->policy;
1881                 pr_debug("setting range\n");
1882                 ret = cpufreq_driver->setpolicy(new_policy);
1883         } else {
1884                 if (new_policy->governor != policy->governor) {
1885                         /* save old, working values */
1886                         struct cpufreq_governor *old_gov = policy->governor;
1887
1888                         pr_debug("governor switch\n");
1889
1890                         /* end old governor */
1891                         if (policy->governor) {
1892                                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1893                                 unlock_policy_rwsem_write(new_policy->cpu);
1894                                 __cpufreq_governor(policy,
1895                                                 CPUFREQ_GOV_POLICY_EXIT);
1896                                 lock_policy_rwsem_write(new_policy->cpu);
1897                         }
1898
1899                         /* start new governor */
1900                         policy->governor = new_policy->governor;
1901                         if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1902                                 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1903                                         failed = 0;
1904                                 } else {
1905                                         unlock_policy_rwsem_write(new_policy->cpu);
1906                                         __cpufreq_governor(policy,
1907                                                         CPUFREQ_GOV_POLICY_EXIT);
1908                                         lock_policy_rwsem_write(new_policy->cpu);
1909                                 }
1910                         }
1911
1912                         if (failed) {
1913                                 /* new governor failed, so re-start old one */
1914                                 pr_debug("starting governor %s failed\n",
1915                                                         policy->governor->name);
1916                                 if (old_gov) {
1917                                         policy->governor = old_gov;
1918                                         __cpufreq_governor(policy,
1919                                                         CPUFREQ_GOV_POLICY_INIT);
1920                                         __cpufreq_governor(policy,
1921                                                            CPUFREQ_GOV_START);
1922                                 }
1923                                 ret = -EINVAL;
1924                                 goto error_out;
1925                         }
1926                         /* might be a policy change, too, so fall through */
1927                 }
1928                 pr_debug("governor: change or update limits\n");
1929                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1930         }
1931
1932 error_out:
1933         return ret;
1934 }
1935
1936 /**
1937  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1938  *      @cpu: CPU which shall be re-evaluated
1939  *
1940  *      Useful for policy notifiers which have different necessities
1941  *      at different times.
1942  */
1943 int cpufreq_update_policy(unsigned int cpu)
1944 {
1945         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1946         struct cpufreq_policy new_policy;
1947         int ret;
1948
1949         if (!policy) {
1950                 ret = -ENODEV;
1951                 goto no_policy;
1952         }
1953
1954         if (unlikely(lock_policy_rwsem_write(cpu))) {
1955                 ret = -EINVAL;
1956                 goto fail;
1957         }
1958
1959         pr_debug("updating policy for CPU %u\n", cpu);
1960         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
1961         new_policy.min = policy->user_policy.min;
1962         new_policy.max = policy->user_policy.max;
1963         new_policy.policy = policy->user_policy.policy;
1964         new_policy.governor = policy->user_policy.governor;
1965
1966         /*
1967          * BIOS might change freq behind our back
1968          * -> ask driver for current freq and notify governors about a change
1969          */
1970         if (cpufreq_driver->get) {
1971                 new_policy.cur = cpufreq_driver->get(cpu);
1972                 if (!policy->cur) {
1973                         pr_debug("Driver did not initialize current freq");
1974                         policy->cur = new_policy.cur;
1975                 } else {
1976                         if (policy->cur != new_policy.cur && cpufreq_driver->target)
1977                                 cpufreq_out_of_sync(cpu, policy->cur,
1978                                                                 new_policy.cur);
1979                 }
1980         }
1981
1982         ret = __cpufreq_set_policy(policy, &new_policy);
1983
1984         unlock_policy_rwsem_write(cpu);
1985
1986 fail:
1987         cpufreq_cpu_put(policy);
1988 no_policy:
1989         return ret;
1990 }
1991 EXPORT_SYMBOL(cpufreq_update_policy);
1992
1993 static int cpufreq_cpu_callback(struct notifier_block *nfb,
1994                                         unsigned long action, void *hcpu)
1995 {
1996         unsigned int cpu = (unsigned long)hcpu;
1997         struct device *dev;
1998         bool frozen = false;
1999
2000         dev = get_cpu_device(cpu);
2001         if (dev) {
2002
2003                 if (action & CPU_TASKS_FROZEN)
2004                         frozen = true;
2005
2006                 switch (action & ~CPU_TASKS_FROZEN) {
2007                 case CPU_ONLINE:
2008                         __cpufreq_add_dev(dev, NULL, frozen);
2009                         cpufreq_update_policy(cpu);
2010                         break;
2011
2012                 case CPU_DOWN_PREPARE:
2013                         __cpufreq_remove_dev(dev, NULL, frozen);
2014                         break;
2015
2016                 case CPU_DOWN_FAILED:
2017                         __cpufreq_add_dev(dev, NULL, frozen);
2018                         break;
2019                 }
2020         }
2021         return NOTIFY_OK;
2022 }
2023
2024 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2025         .notifier_call = cpufreq_cpu_callback,
2026 };
2027
2028 /*********************************************************************
2029  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2030  *********************************************************************/
2031
2032 /**
2033  * cpufreq_register_driver - register a CPU Frequency driver
2034  * @driver_data: A struct cpufreq_driver containing the values#
2035  * submitted by the CPU Frequency driver.
2036  *
2037  * Registers a CPU Frequency driver to this core code. This code
2038  * returns zero on success, -EBUSY when another driver got here first
2039  * (and isn't unregistered in the meantime).
2040  *
2041  */
2042 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2043 {
2044         unsigned long flags;
2045         int ret;
2046
2047         if (cpufreq_disabled())
2048                 return -ENODEV;
2049
2050         if (!driver_data || !driver_data->verify || !driver_data->init ||
2051             ((!driver_data->setpolicy) && (!driver_data->target)))
2052                 return -EINVAL;
2053
2054         pr_debug("trying to register driver %s\n", driver_data->name);
2055
2056         if (driver_data->setpolicy)
2057                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2058
2059         write_lock_irqsave(&cpufreq_driver_lock, flags);
2060         if (cpufreq_driver) {
2061                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2062                 return -EBUSY;
2063         }
2064         cpufreq_driver = driver_data;
2065         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2066
2067         ret = subsys_interface_register(&cpufreq_interface);
2068         if (ret)
2069                 goto err_null_driver;
2070
2071         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2072                 int i;
2073                 ret = -ENODEV;
2074
2075                 /* check for at least one working CPU */
2076                 for (i = 0; i < nr_cpu_ids; i++)
2077                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2078                                 ret = 0;
2079                                 break;
2080                         }
2081
2082                 /* if all ->init() calls failed, unregister */
2083                 if (ret) {
2084                         pr_debug("no CPU initialized for driver %s\n",
2085                                                         driver_data->name);
2086                         goto err_if_unreg;
2087                 }
2088         }
2089
2090         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2091         pr_debug("driver %s up and running\n", driver_data->name);
2092
2093         return 0;
2094 err_if_unreg:
2095         subsys_interface_unregister(&cpufreq_interface);
2096 err_null_driver:
2097         write_lock_irqsave(&cpufreq_driver_lock, flags);
2098         cpufreq_driver = NULL;
2099         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2100         return ret;
2101 }
2102 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2103
2104 /**
2105  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2106  *
2107  * Unregister the current CPUFreq driver. Only call this if you have
2108  * the right to do so, i.e. if you have succeeded in initialising before!
2109  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2110  * currently not initialised.
2111  */
2112 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2113 {
2114         unsigned long flags;
2115
2116         if (!cpufreq_driver || (driver != cpufreq_driver))
2117                 return -EINVAL;
2118
2119         pr_debug("unregistering driver %s\n", driver->name);
2120
2121         subsys_interface_unregister(&cpufreq_interface);
2122         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2123
2124         write_lock_irqsave(&cpufreq_driver_lock, flags);
2125         cpufreq_driver = NULL;
2126         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2127
2128         return 0;
2129 }
2130 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2131
2132 static int __init cpufreq_core_init(void)
2133 {
2134         int cpu;
2135
2136         if (cpufreq_disabled())
2137                 return -ENODEV;
2138
2139         for_each_possible_cpu(cpu) {
2140                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2141                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2142         }
2143
2144         cpufreq_global_kobject = kobject_create();
2145         BUG_ON(!cpufreq_global_kobject);
2146         register_syscore_ops(&cpufreq_syscore_ops);
2147
2148         return 0;
2149 }
2150 core_initcall(cpufreq_core_init);