cpufreq: Preserve policy structure across suspend/resume
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37
38 #include <trace/events/power.h>
39
40 /**
41  * The "cpufreq driver" - the arch- or hardware-dependent low
42  * level driver of CPUFreq support, and its spinlock. This lock
43  * also protects the cpufreq_cpu_data array.
44  */
45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
49 static DEFINE_MUTEX(cpufreq_governor_lock);
50
51 #ifdef CONFIG_HOTPLUG_CPU
52 /* This one keeps track of the previously set governor of a removed CPU */
53 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
54 #endif
55
56 /*
57  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
58  * all cpufreq/hotplug/workqueue/etc related lock issues.
59  *
60  * The rules for this semaphore:
61  * - Any routine that wants to read from the policy structure will
62  *   do a down_read on this semaphore.
63  * - Any routine that will write to the policy structure and/or may take away
64  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
65  *   mode before doing so.
66  *
67  * Additional rules:
68  * - Governor routines that can be called in cpufreq hotplug path should not
69  *   take this sem as top level hotplug notifier handler takes this.
70  * - Lock should not be held across
71  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
72  */
73 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
74 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
75
76 #define lock_policy_rwsem(mode, cpu)                                    \
77 static int lock_policy_rwsem_##mode(int cpu)                            \
78 {                                                                       \
79         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
80         BUG_ON(policy_cpu == -1);                                       \
81         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
82                                                                         \
83         return 0;                                                       \
84 }
85
86 lock_policy_rwsem(read, cpu);
87 lock_policy_rwsem(write, cpu);
88
89 #define unlock_policy_rwsem(mode, cpu)                                  \
90 static void unlock_policy_rwsem_##mode(int cpu)                         \
91 {                                                                       \
92         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
93         BUG_ON(policy_cpu == -1);                                       \
94         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
95 }
96
97 unlock_policy_rwsem(read, cpu);
98 unlock_policy_rwsem(write, cpu);
99
100 /* internal prototypes */
101 static int __cpufreq_governor(struct cpufreq_policy *policy,
102                 unsigned int event);
103 static unsigned int __cpufreq_get(unsigned int cpu);
104 static void handle_update(struct work_struct *work);
105
106 /**
107  * Two notifier lists: the "policy" list is involved in the
108  * validation process for a new CPU frequency policy; the
109  * "transition" list for kernel code that needs to handle
110  * changes to devices when the CPU clock speed changes.
111  * The mutex locks both lists.
112  */
113 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
114 static struct srcu_notifier_head cpufreq_transition_notifier_list;
115
116 static bool init_cpufreq_transition_notifier_list_called;
117 static int __init init_cpufreq_transition_notifier_list(void)
118 {
119         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
120         init_cpufreq_transition_notifier_list_called = true;
121         return 0;
122 }
123 pure_initcall(init_cpufreq_transition_notifier_list);
124
125 static int off __read_mostly;
126 static int cpufreq_disabled(void)
127 {
128         return off;
129 }
130 void disable_cpufreq(void)
131 {
132         off = 1;
133 }
134 static LIST_HEAD(cpufreq_governor_list);
135 static DEFINE_MUTEX(cpufreq_governor_mutex);
136
137 bool have_governor_per_policy(void)
138 {
139         return cpufreq_driver->have_governor_per_policy;
140 }
141 EXPORT_SYMBOL_GPL(have_governor_per_policy);
142
143 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144 {
145         if (have_governor_per_policy())
146                 return &policy->kobj;
147         else
148                 return cpufreq_global_kobject;
149 }
150 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151
152 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153 {
154         u64 idle_time;
155         u64 cur_wall_time;
156         u64 busy_time;
157
158         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159
160         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166
167         idle_time = cur_wall_time - busy_time;
168         if (wall)
169                 *wall = cputime_to_usecs(cur_wall_time);
170
171         return cputime_to_usecs(idle_time);
172 }
173
174 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175 {
176         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177
178         if (idle_time == -1ULL)
179                 return get_cpu_idle_time_jiffy(cpu, wall);
180         else if (!io_busy)
181                 idle_time += get_cpu_iowait_time_us(cpu, wall);
182
183         return idle_time;
184 }
185 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186
187 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
188 {
189         struct cpufreq_policy *data;
190         unsigned long flags;
191
192         if (cpu >= nr_cpu_ids)
193                 goto err_out;
194
195         /* get the cpufreq driver */
196         read_lock_irqsave(&cpufreq_driver_lock, flags);
197
198         if (!cpufreq_driver)
199                 goto err_out_unlock;
200
201         if (!try_module_get(cpufreq_driver->owner))
202                 goto err_out_unlock;
203
204         /* get the CPU */
205         data = per_cpu(cpufreq_cpu_data, cpu);
206
207         if (!data)
208                 goto err_out_put_module;
209
210         if (!sysfs && !kobject_get(&data->kobj))
211                 goto err_out_put_module;
212
213         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
214         return data;
215
216 err_out_put_module:
217         module_put(cpufreq_driver->owner);
218 err_out_unlock:
219         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
220 err_out:
221         return NULL;
222 }
223
224 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
225 {
226         if (cpufreq_disabled())
227                 return NULL;
228
229         return __cpufreq_cpu_get(cpu, false);
230 }
231 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
232
233 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
234 {
235         return __cpufreq_cpu_get(cpu, true);
236 }
237
238 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
239 {
240         if (!sysfs)
241                 kobject_put(&data->kobj);
242         module_put(cpufreq_driver->owner);
243 }
244
245 void cpufreq_cpu_put(struct cpufreq_policy *data)
246 {
247         if (cpufreq_disabled())
248                 return;
249
250         __cpufreq_cpu_put(data, false);
251 }
252 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253
254 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
255 {
256         __cpufreq_cpu_put(data, true);
257 }
258
259 /*********************************************************************
260  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
261  *********************************************************************/
262
263 /**
264  * adjust_jiffies - adjust the system "loops_per_jiffy"
265  *
266  * This function alters the system "loops_per_jiffy" for the clock
267  * speed change. Note that loops_per_jiffy cannot be updated on SMP
268  * systems as each CPU might be scaled differently. So, use the arch
269  * per-CPU loops_per_jiffy value wherever possible.
270  */
271 #ifndef CONFIG_SMP
272 static unsigned long l_p_j_ref;
273 static unsigned int l_p_j_ref_freq;
274
275 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276 {
277         if (ci->flags & CPUFREQ_CONST_LOOPS)
278                 return;
279
280         if (!l_p_j_ref_freq) {
281                 l_p_j_ref = loops_per_jiffy;
282                 l_p_j_ref_freq = ci->old;
283                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
284                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
285         }
286         if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
287             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
288                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
289                                                                 ci->new);
290                 pr_debug("scaling loops_per_jiffy to %lu "
291                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
292         }
293 }
294 #else
295 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
296 {
297         return;
298 }
299 #endif
300
301 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
302                 struct cpufreq_freqs *freqs, unsigned int state)
303 {
304         BUG_ON(irqs_disabled());
305
306         if (cpufreq_disabled())
307                 return;
308
309         freqs->flags = cpufreq_driver->flags;
310         pr_debug("notification %u of frequency transition to %u kHz\n",
311                 state, freqs->new);
312
313         switch (state) {
314
315         case CPUFREQ_PRECHANGE:
316                 if (WARN(policy->transition_ongoing ==
317                                         cpumask_weight(policy->cpus),
318                                 "In middle of another frequency transition\n"))
319                         return;
320
321                 policy->transition_ongoing++;
322
323                 /* detect if the driver reported a value as "old frequency"
324                  * which is not equal to what the cpufreq core thinks is
325                  * "old frequency".
326                  */
327                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
328                         if ((policy) && (policy->cpu == freqs->cpu) &&
329                             (policy->cur) && (policy->cur != freqs->old)) {
330                                 pr_debug("Warning: CPU frequency is"
331                                         " %u, cpufreq assumed %u kHz.\n",
332                                         freqs->old, policy->cur);
333                                 freqs->old = policy->cur;
334                         }
335                 }
336                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
337                                 CPUFREQ_PRECHANGE, freqs);
338                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
339                 break;
340
341         case CPUFREQ_POSTCHANGE:
342                 if (WARN(!policy->transition_ongoing,
343                                 "No frequency transition in progress\n"))
344                         return;
345
346                 policy->transition_ongoing--;
347
348                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
349                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
350                         (unsigned long)freqs->cpu);
351                 trace_cpu_frequency(freqs->new, freqs->cpu);
352                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
353                                 CPUFREQ_POSTCHANGE, freqs);
354                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
355                         policy->cur = freqs->new;
356                 break;
357         }
358 }
359
360 /**
361  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
362  * on frequency transition.
363  *
364  * This function calls the transition notifiers and the "adjust_jiffies"
365  * function. It is called twice on all CPU frequency changes that have
366  * external effects.
367  */
368 void cpufreq_notify_transition(struct cpufreq_policy *policy,
369                 struct cpufreq_freqs *freqs, unsigned int state)
370 {
371         for_each_cpu(freqs->cpu, policy->cpus)
372                 __cpufreq_notify_transition(policy, freqs, state);
373 }
374 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
375
376
377 /*********************************************************************
378  *                          SYSFS INTERFACE                          *
379  *********************************************************************/
380
381 static struct cpufreq_governor *__find_governor(const char *str_governor)
382 {
383         struct cpufreq_governor *t;
384
385         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
386                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
387                         return t;
388
389         return NULL;
390 }
391
392 /**
393  * cpufreq_parse_governor - parse a governor string
394  */
395 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
396                                 struct cpufreq_governor **governor)
397 {
398         int err = -EINVAL;
399
400         if (!cpufreq_driver)
401                 goto out;
402
403         if (cpufreq_driver->setpolicy) {
404                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
405                         *policy = CPUFREQ_POLICY_PERFORMANCE;
406                         err = 0;
407                 } else if (!strnicmp(str_governor, "powersave",
408                                                 CPUFREQ_NAME_LEN)) {
409                         *policy = CPUFREQ_POLICY_POWERSAVE;
410                         err = 0;
411                 }
412         } else if (cpufreq_driver->target) {
413                 struct cpufreq_governor *t;
414
415                 mutex_lock(&cpufreq_governor_mutex);
416
417                 t = __find_governor(str_governor);
418
419                 if (t == NULL) {
420                         int ret;
421
422                         mutex_unlock(&cpufreq_governor_mutex);
423                         ret = request_module("cpufreq_%s", str_governor);
424                         mutex_lock(&cpufreq_governor_mutex);
425
426                         if (ret == 0)
427                                 t = __find_governor(str_governor);
428                 }
429
430                 if (t != NULL) {
431                         *governor = t;
432                         err = 0;
433                 }
434
435                 mutex_unlock(&cpufreq_governor_mutex);
436         }
437 out:
438         return err;
439 }
440
441 /**
442  * cpufreq_per_cpu_attr_read() / show_##file_name() -
443  * print out cpufreq information
444  *
445  * Write out information from cpufreq_driver->policy[cpu]; object must be
446  * "unsigned int".
447  */
448
449 #define show_one(file_name, object)                     \
450 static ssize_t show_##file_name                         \
451 (struct cpufreq_policy *policy, char *buf)              \
452 {                                                       \
453         return sprintf(buf, "%u\n", policy->object);    \
454 }
455
456 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
458 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
459 show_one(scaling_min_freq, min);
460 show_one(scaling_max_freq, max);
461 show_one(scaling_cur_freq, cur);
462
463 static int __cpufreq_set_policy(struct cpufreq_policy *data,
464                                 struct cpufreq_policy *policy);
465
466 /**
467  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468  */
469 #define store_one(file_name, object)                    \
470 static ssize_t store_##file_name                                        \
471 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
472 {                                                                       \
473         unsigned int ret;                                               \
474         struct cpufreq_policy new_policy;                               \
475                                                                         \
476         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
477         if (ret)                                                        \
478                 return -EINVAL;                                         \
479                                                                         \
480         ret = sscanf(buf, "%u", &new_policy.object);                    \
481         if (ret != 1)                                                   \
482                 return -EINVAL;                                         \
483                                                                         \
484         ret = __cpufreq_set_policy(policy, &new_policy);                \
485         policy->user_policy.object = policy->object;                    \
486                                                                         \
487         return ret ? ret : count;                                       \
488 }
489
490 store_one(scaling_min_freq, min);
491 store_one(scaling_max_freq, max);
492
493 /**
494  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495  */
496 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
497                                         char *buf)
498 {
499         unsigned int cur_freq = __cpufreq_get(policy->cpu);
500         if (!cur_freq)
501                 return sprintf(buf, "<unknown>");
502         return sprintf(buf, "%u\n", cur_freq);
503 }
504
505 /**
506  * show_scaling_governor - show the current policy for the specified CPU
507  */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511                 return sprintf(buf, "powersave\n");
512         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513                 return sprintf(buf, "performance\n");
514         else if (policy->governor)
515                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
516                                 policy->governor->name);
517         return -EINVAL;
518 }
519
520 /**
521  * store_scaling_governor - store policy for the specified CPU
522  */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524                                         const char *buf, size_t count)
525 {
526         unsigned int ret;
527         char    str_governor[16];
528         struct cpufreq_policy new_policy;
529
530         ret = cpufreq_get_policy(&new_policy, policy->cpu);
531         if (ret)
532                 return ret;
533
534         ret = sscanf(buf, "%15s", str_governor);
535         if (ret != 1)
536                 return -EINVAL;
537
538         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539                                                 &new_policy.governor))
540                 return -EINVAL;
541
542         /*
543          * Do not use cpufreq_set_policy here or the user_policy.max
544          * will be wrongly overridden
545          */
546         ret = __cpufreq_set_policy(policy, &new_policy);
547
548         policy->user_policy.policy = policy->policy;
549         policy->user_policy.governor = policy->governor;
550
551         if (ret)
552                 return ret;
553         else
554                 return count;
555 }
556
557 /**
558  * show_scaling_driver - show the cpufreq driver currently loaded
559  */
560 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
561 {
562         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
563 }
564
565 /**
566  * show_scaling_available_governors - show the available CPUfreq governors
567  */
568 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
569                                                 char *buf)
570 {
571         ssize_t i = 0;
572         struct cpufreq_governor *t;
573
574         if (!cpufreq_driver->target) {
575                 i += sprintf(buf, "performance powersave");
576                 goto out;
577         }
578
579         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
580                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
581                     - (CPUFREQ_NAME_LEN + 2)))
582                         goto out;
583                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
584         }
585 out:
586         i += sprintf(&buf[i], "\n");
587         return i;
588 }
589
590 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
591 {
592         ssize_t i = 0;
593         unsigned int cpu;
594
595         for_each_cpu(cpu, mask) {
596                 if (i)
597                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
598                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
599                 if (i >= (PAGE_SIZE - 5))
600                         break;
601         }
602         i += sprintf(&buf[i], "\n");
603         return i;
604 }
605 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
606
607 /**
608  * show_related_cpus - show the CPUs affected by each transition even if
609  * hw coordination is in use
610  */
611 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
612 {
613         return cpufreq_show_cpus(policy->related_cpus, buf);
614 }
615
616 /**
617  * show_affected_cpus - show the CPUs affected by each transition
618  */
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620 {
621         return cpufreq_show_cpus(policy->cpus, buf);
622 }
623
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625                                         const char *buf, size_t count)
626 {
627         unsigned int freq = 0;
628         unsigned int ret;
629
630         if (!policy->governor || !policy->governor->store_setspeed)
631                 return -EINVAL;
632
633         ret = sscanf(buf, "%u", &freq);
634         if (ret != 1)
635                 return -EINVAL;
636
637         policy->governor->store_setspeed(policy, freq);
638
639         return count;
640 }
641
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643 {
644         if (!policy->governor || !policy->governor->show_setspeed)
645                 return sprintf(buf, "<unsupported>\n");
646
647         return policy->governor->show_setspeed(policy, buf);
648 }
649
650 /**
651  * show_bios_limit - show the current cpufreq HW/BIOS limitation
652  */
653 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654 {
655         unsigned int limit;
656         int ret;
657         if (cpufreq_driver->bios_limit) {
658                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
659                 if (!ret)
660                         return sprintf(buf, "%u\n", limit);
661         }
662         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663 }
664
665 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666 cpufreq_freq_attr_ro(cpuinfo_min_freq);
667 cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669 cpufreq_freq_attr_ro(scaling_available_governors);
670 cpufreq_freq_attr_ro(scaling_driver);
671 cpufreq_freq_attr_ro(scaling_cur_freq);
672 cpufreq_freq_attr_ro(bios_limit);
673 cpufreq_freq_attr_ro(related_cpus);
674 cpufreq_freq_attr_ro(affected_cpus);
675 cpufreq_freq_attr_rw(scaling_min_freq);
676 cpufreq_freq_attr_rw(scaling_max_freq);
677 cpufreq_freq_attr_rw(scaling_governor);
678 cpufreq_freq_attr_rw(scaling_setspeed);
679
680 static struct attribute *default_attrs[] = {
681         &cpuinfo_min_freq.attr,
682         &cpuinfo_max_freq.attr,
683         &cpuinfo_transition_latency.attr,
684         &scaling_min_freq.attr,
685         &scaling_max_freq.attr,
686         &affected_cpus.attr,
687         &related_cpus.attr,
688         &scaling_governor.attr,
689         &scaling_driver.attr,
690         &scaling_available_governors.attr,
691         &scaling_setspeed.attr,
692         NULL
693 };
694
695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696 #define to_attr(a) container_of(a, struct freq_attr, attr)
697
698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 {
700         struct cpufreq_policy *policy = to_policy(kobj);
701         struct freq_attr *fattr = to_attr(attr);
702         ssize_t ret = -EINVAL;
703         policy = cpufreq_cpu_get_sysfs(policy->cpu);
704         if (!policy)
705                 goto no_policy;
706
707         if (lock_policy_rwsem_read(policy->cpu) < 0)
708                 goto fail;
709
710         if (fattr->show)
711                 ret = fattr->show(policy, buf);
712         else
713                 ret = -EIO;
714
715         unlock_policy_rwsem_read(policy->cpu);
716 fail:
717         cpufreq_cpu_put_sysfs(policy);
718 no_policy:
719         return ret;
720 }
721
722 static ssize_t store(struct kobject *kobj, struct attribute *attr,
723                      const char *buf, size_t count)
724 {
725         struct cpufreq_policy *policy = to_policy(kobj);
726         struct freq_attr *fattr = to_attr(attr);
727         ssize_t ret = -EINVAL;
728         policy = cpufreq_cpu_get_sysfs(policy->cpu);
729         if (!policy)
730                 goto no_policy;
731
732         if (lock_policy_rwsem_write(policy->cpu) < 0)
733                 goto fail;
734
735         if (fattr->store)
736                 ret = fattr->store(policy, buf, count);
737         else
738                 ret = -EIO;
739
740         unlock_policy_rwsem_write(policy->cpu);
741 fail:
742         cpufreq_cpu_put_sysfs(policy);
743 no_policy:
744         return ret;
745 }
746
747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 {
749         struct cpufreq_policy *policy = to_policy(kobj);
750         pr_debug("last reference is dropped\n");
751         complete(&policy->kobj_unregister);
752 }
753
754 static const struct sysfs_ops sysfs_ops = {
755         .show   = show,
756         .store  = store,
757 };
758
759 static struct kobj_type ktype_cpufreq = {
760         .sysfs_ops      = &sysfs_ops,
761         .default_attrs  = default_attrs,
762         .release        = cpufreq_sysfs_release,
763 };
764
765 struct kobject *cpufreq_global_kobject;
766 EXPORT_SYMBOL(cpufreq_global_kobject);
767
768 static int cpufreq_global_kobject_usage;
769
770 int cpufreq_get_global_kobject(void)
771 {
772         if (!cpufreq_global_kobject_usage++)
773                 return kobject_add(cpufreq_global_kobject,
774                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
775
776         return 0;
777 }
778 EXPORT_SYMBOL(cpufreq_get_global_kobject);
779
780 void cpufreq_put_global_kobject(void)
781 {
782         if (!--cpufreq_global_kobject_usage)
783                 kobject_del(cpufreq_global_kobject);
784 }
785 EXPORT_SYMBOL(cpufreq_put_global_kobject);
786
787 int cpufreq_sysfs_create_file(const struct attribute *attr)
788 {
789         int ret = cpufreq_get_global_kobject();
790
791         if (!ret) {
792                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
793                 if (ret)
794                         cpufreq_put_global_kobject();
795         }
796
797         return ret;
798 }
799 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
800
801 void cpufreq_sysfs_remove_file(const struct attribute *attr)
802 {
803         sysfs_remove_file(cpufreq_global_kobject, attr);
804         cpufreq_put_global_kobject();
805 }
806 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
807
808 /* symlink affected CPUs */
809 static int cpufreq_add_dev_symlink(unsigned int cpu,
810                                    struct cpufreq_policy *policy)
811 {
812         unsigned int j;
813         int ret = 0;
814
815         for_each_cpu(j, policy->cpus) {
816                 struct cpufreq_policy *managed_policy;
817                 struct device *cpu_dev;
818
819                 if (j == cpu)
820                         continue;
821
822                 pr_debug("CPU %u already managed, adding link\n", j);
823                 managed_policy = cpufreq_cpu_get(cpu);
824                 cpu_dev = get_cpu_device(j);
825                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
826                                         "cpufreq");
827                 if (ret) {
828                         cpufreq_cpu_put(managed_policy);
829                         return ret;
830                 }
831         }
832         return ret;
833 }
834
835 static int cpufreq_add_dev_interface(unsigned int cpu,
836                                      struct cpufreq_policy *policy,
837                                      struct device *dev)
838 {
839         struct freq_attr **drv_attr;
840         int ret = 0;
841
842         /* prepare interface data */
843         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
844                                    &dev->kobj, "cpufreq");
845         if (ret)
846                 return ret;
847
848         /* set up files for this cpu device */
849         drv_attr = cpufreq_driver->attr;
850         while ((drv_attr) && (*drv_attr)) {
851                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
852                 if (ret)
853                         goto err_out_kobj_put;
854                 drv_attr++;
855         }
856         if (cpufreq_driver->get) {
857                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
858                 if (ret)
859                         goto err_out_kobj_put;
860         }
861         if (cpufreq_driver->target) {
862                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
863                 if (ret)
864                         goto err_out_kobj_put;
865         }
866         if (cpufreq_driver->bios_limit) {
867                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
868                 if (ret)
869                         goto err_out_kobj_put;
870         }
871
872         ret = cpufreq_add_dev_symlink(cpu, policy);
873         if (ret)
874                 goto err_out_kobj_put;
875
876         return ret;
877
878 err_out_kobj_put:
879         kobject_put(&policy->kobj);
880         wait_for_completion(&policy->kobj_unregister);
881         return ret;
882 }
883
884 static void cpufreq_init_policy(struct cpufreq_policy *policy)
885 {
886         struct cpufreq_policy new_policy;
887         int ret = 0;
888
889         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
890         /* assure that the starting sequence is run in __cpufreq_set_policy */
891         policy->governor = NULL;
892
893         /* set default policy */
894         ret = __cpufreq_set_policy(policy, &new_policy);
895         policy->user_policy.policy = policy->policy;
896         policy->user_policy.governor = policy->governor;
897
898         if (ret) {
899                 pr_debug("setting policy failed\n");
900                 if (cpufreq_driver->exit)
901                         cpufreq_driver->exit(policy);
902         }
903 }
904
905 #ifdef CONFIG_HOTPLUG_CPU
906 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
907                                   struct device *dev, bool frozen)
908 {
909         struct cpufreq_policy *policy;
910         int ret = 0, has_target = !!cpufreq_driver->target;
911         unsigned long flags;
912
913         policy = cpufreq_cpu_get(sibling);
914         WARN_ON(!policy);
915
916         if (has_target)
917                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
918
919         lock_policy_rwsem_write(sibling);
920
921         write_lock_irqsave(&cpufreq_driver_lock, flags);
922
923         cpumask_set_cpu(cpu, policy->cpus);
924         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
925         per_cpu(cpufreq_cpu_data, cpu) = policy;
926         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
927
928         unlock_policy_rwsem_write(sibling);
929
930         if (has_target) {
931                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
932                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
933         }
934
935         /* Don't touch sysfs links during light-weight init */
936         if (frozen) {
937                 /* Drop the extra refcount that we took above */
938                 cpufreq_cpu_put(policy);
939                 return 0;
940         }
941
942         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
943         if (ret)
944                 cpufreq_cpu_put(policy);
945
946         return ret;
947 }
948 #endif
949
950 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
951 {
952         struct cpufreq_policy *policy;
953         unsigned long flags;
954
955         write_lock_irqsave(&cpufreq_driver_lock, flags);
956
957         policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
958
959         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
960
961         return policy;
962 }
963
964 static struct cpufreq_policy *cpufreq_policy_alloc(void)
965 {
966         struct cpufreq_policy *policy;
967
968         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
969         if (!policy)
970                 return NULL;
971
972         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
973                 goto err_free_policy;
974
975         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
976                 goto err_free_cpumask;
977
978         return policy;
979
980 err_free_cpumask:
981         free_cpumask_var(policy->cpus);
982 err_free_policy:
983         kfree(policy);
984
985         return NULL;
986 }
987
988 static void cpufreq_policy_free(struct cpufreq_policy *policy)
989 {
990         free_cpumask_var(policy->related_cpus);
991         free_cpumask_var(policy->cpus);
992         kfree(policy);
993 }
994
995 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
996                              bool frozen)
997 {
998         unsigned int j, cpu = dev->id;
999         int ret = -ENOMEM;
1000         struct cpufreq_policy *policy;
1001         unsigned long flags;
1002 #ifdef CONFIG_HOTPLUG_CPU
1003         struct cpufreq_governor *gov;
1004         int sibling;
1005 #endif
1006
1007         if (cpu_is_offline(cpu))
1008                 return 0;
1009
1010         pr_debug("adding CPU %u\n", cpu);
1011
1012 #ifdef CONFIG_SMP
1013         /* check whether a different CPU already registered this
1014          * CPU because it is in the same boat. */
1015         policy = cpufreq_cpu_get(cpu);
1016         if (unlikely(policy)) {
1017                 cpufreq_cpu_put(policy);
1018                 return 0;
1019         }
1020
1021 #ifdef CONFIG_HOTPLUG_CPU
1022         /* Check if this cpu was hot-unplugged earlier and has siblings */
1023         read_lock_irqsave(&cpufreq_driver_lock, flags);
1024         for_each_online_cpu(sibling) {
1025                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1026                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
1027                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1028                         return cpufreq_add_policy_cpu(cpu, sibling, dev,
1029                                                       frozen);
1030                 }
1031         }
1032         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1033 #endif
1034 #endif
1035
1036         if (!try_module_get(cpufreq_driver->owner)) {
1037                 ret = -EINVAL;
1038                 goto module_out;
1039         }
1040
1041         if (frozen)
1042                 /* Restore the saved policy when doing light-weight init */
1043                 policy = cpufreq_policy_restore(cpu);
1044         else
1045                 policy = cpufreq_policy_alloc();
1046
1047         if (!policy)
1048                 goto nomem_out;
1049
1050         policy->cpu = cpu;
1051         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1052         cpumask_copy(policy->cpus, cpumask_of(cpu));
1053
1054         /* Initially set CPU itself as the policy_cpu */
1055         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1056
1057         init_completion(&policy->kobj_unregister);
1058         INIT_WORK(&policy->update, handle_update);
1059
1060         /* call driver. From then on the cpufreq must be able
1061          * to accept all calls to ->verify and ->setpolicy for this CPU
1062          */
1063         ret = cpufreq_driver->init(policy);
1064         if (ret) {
1065                 pr_debug("initialization failed\n");
1066                 goto err_set_policy_cpu;
1067         }
1068
1069         /* related cpus should atleast have policy->cpus */
1070         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1071
1072         /*
1073          * affected cpus must always be the one, which are online. We aren't
1074          * managing offline cpus here.
1075          */
1076         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1077
1078         policy->user_policy.min = policy->min;
1079         policy->user_policy.max = policy->max;
1080
1081         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1082                                      CPUFREQ_START, policy);
1083
1084 #ifdef CONFIG_HOTPLUG_CPU
1085         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1086         if (gov) {
1087                 policy->governor = gov;
1088                 pr_debug("Restoring governor %s for cpu %d\n",
1089                        policy->governor->name, cpu);
1090         }
1091 #endif
1092
1093         write_lock_irqsave(&cpufreq_driver_lock, flags);
1094         for_each_cpu(j, policy->cpus) {
1095                 per_cpu(cpufreq_cpu_data, j) = policy;
1096                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1097         }
1098         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1099
1100         if (!frozen) {
1101                 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1102                 if (ret)
1103                         goto err_out_unregister;
1104         }
1105
1106         cpufreq_init_policy(policy);
1107
1108         kobject_uevent(&policy->kobj, KOBJ_ADD);
1109         module_put(cpufreq_driver->owner);
1110         pr_debug("initialization complete\n");
1111
1112         return 0;
1113
1114 err_out_unregister:
1115         write_lock_irqsave(&cpufreq_driver_lock, flags);
1116         for_each_cpu(j, policy->cpus) {
1117                 per_cpu(cpufreq_cpu_data, j) = NULL;
1118                 if (j != cpu)
1119                         per_cpu(cpufreq_policy_cpu, j) = -1;
1120         }
1121         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122
1123         kobject_put(&policy->kobj);
1124         wait_for_completion(&policy->kobj_unregister);
1125
1126 err_set_policy_cpu:
1127         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1128         cpufreq_policy_free(policy);
1129 nomem_out:
1130         module_put(cpufreq_driver->owner);
1131 module_out:
1132         return ret;
1133 }
1134
1135 /**
1136  * cpufreq_add_dev - add a CPU device
1137  *
1138  * Adds the cpufreq interface for a CPU device.
1139  *
1140  * The Oracle says: try running cpufreq registration/unregistration concurrently
1141  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1142  * mess up, but more thorough testing is needed. - Mathieu
1143  */
1144 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1145 {
1146         return __cpufreq_add_dev(dev, sif, false);
1147 }
1148
1149 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1150 {
1151         int j;
1152
1153         policy->last_cpu = policy->cpu;
1154         policy->cpu = cpu;
1155
1156         for_each_cpu(j, policy->cpus)
1157                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1158
1159 #ifdef CONFIG_CPU_FREQ_TABLE
1160         cpufreq_frequency_table_update_policy_cpu(policy);
1161 #endif
1162         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1163                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1164 }
1165
1166 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data,
1167                                            unsigned int old_cpu, bool frozen)
1168 {
1169         struct device *cpu_dev;
1170         unsigned long flags;
1171         int ret;
1172
1173         /* first sibling now owns the new sysfs dir */
1174         cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1175
1176         /* Don't touch sysfs files during light-weight tear-down */
1177         if (frozen)
1178                 return cpu_dev->id;
1179
1180         sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1181         ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1182         if (ret) {
1183                 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1184
1185                 WARN_ON(lock_policy_rwsem_write(old_cpu));
1186                 cpumask_set_cpu(old_cpu, data->cpus);
1187
1188                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1189                 per_cpu(cpufreq_cpu_data, old_cpu) = data;
1190                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1191
1192                 unlock_policy_rwsem_write(old_cpu);
1193
1194                 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1195                                         "cpufreq");
1196
1197                 return -EINVAL;
1198         }
1199
1200         return cpu_dev->id;
1201 }
1202
1203 /**
1204  * __cpufreq_remove_dev - remove a CPU device
1205  *
1206  * Removes the cpufreq interface for a CPU device.
1207  * Caller should already have policy_rwsem in write mode for this CPU.
1208  * This routine frees the rwsem before returning.
1209  */
1210 static int __cpufreq_remove_dev(struct device *dev,
1211                                 struct subsys_interface *sif, bool frozen)
1212 {
1213         unsigned int cpu = dev->id, cpus;
1214         int new_cpu;
1215         unsigned long flags;
1216         struct cpufreq_policy *data;
1217         struct kobject *kobj;
1218         struct completion *cmp;
1219
1220         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1221
1222         write_lock_irqsave(&cpufreq_driver_lock, flags);
1223
1224         data = per_cpu(cpufreq_cpu_data, cpu);
1225         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1226
1227         /* Save the policy somewhere when doing a light-weight tear-down */
1228         if (frozen)
1229                 per_cpu(cpufreq_cpu_data_fallback, cpu) = data;
1230
1231         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1232
1233         if (!data) {
1234                 pr_debug("%s: No cpu_data found\n", __func__);
1235                 return -EINVAL;
1236         }
1237
1238         if (cpufreq_driver->target)
1239                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1240
1241 #ifdef CONFIG_HOTPLUG_CPU
1242         if (!cpufreq_driver->setpolicy)
1243                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1244                         data->governor->name, CPUFREQ_NAME_LEN);
1245 #endif
1246
1247         WARN_ON(lock_policy_rwsem_write(cpu));
1248         cpus = cpumask_weight(data->cpus);
1249
1250         if (cpus > 1)
1251                 cpumask_clear_cpu(cpu, data->cpus);
1252         unlock_policy_rwsem_write(cpu);
1253
1254         if (cpu != data->cpu && !frozen) {
1255                 sysfs_remove_link(&dev->kobj, "cpufreq");
1256         } else if (cpus > 1) {
1257
1258                 new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen);
1259                 if (new_cpu >= 0) {
1260                         WARN_ON(lock_policy_rwsem_write(cpu));
1261                         update_policy_cpu(data, new_cpu);
1262                         unlock_policy_rwsem_write(cpu);
1263
1264                         if (!frozen) {
1265                                 pr_debug("%s: policy Kobject moved to cpu: %d "
1266                                          "from: %d\n",__func__, new_cpu, cpu);
1267                         }
1268                 }
1269         }
1270
1271         /* If cpu is last user of policy, free policy */
1272         if (cpus == 1) {
1273                 if (cpufreq_driver->target)
1274                         __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1275
1276                 if (!frozen) {
1277                         lock_policy_rwsem_read(cpu);
1278                         kobj = &data->kobj;
1279                         cmp = &data->kobj_unregister;
1280                         unlock_policy_rwsem_read(cpu);
1281                         kobject_put(kobj);
1282
1283                         /*
1284                          * We need to make sure that the underlying kobj is
1285                          * actually not referenced anymore by anybody before we
1286                          * proceed with unloading.
1287                          */
1288                         pr_debug("waiting for dropping of refcount\n");
1289                         wait_for_completion(cmp);
1290                         pr_debug("wait complete\n");
1291                 }
1292
1293                 /*
1294                  * Perform the ->exit() even during light-weight tear-down,
1295                  * since this is a core component, and is essential for the
1296                  * subsequent light-weight ->init() to succeed.
1297                  */
1298                 if (cpufreq_driver->exit)
1299                         cpufreq_driver->exit(data);
1300
1301                 if (!frozen)
1302                         cpufreq_policy_free(data);
1303         } else {
1304
1305                 if (!frozen) {
1306                         pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1307                         cpufreq_cpu_put(data);
1308                 }
1309
1310                 if (cpufreq_driver->target) {
1311                         __cpufreq_governor(data, CPUFREQ_GOV_START);
1312                         __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1313                 }
1314         }
1315
1316         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1317         return 0;
1318 }
1319
1320 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1321 {
1322         unsigned int cpu = dev->id;
1323         int retval;
1324
1325         if (cpu_is_offline(cpu))
1326                 return 0;
1327
1328         retval = __cpufreq_remove_dev(dev, sif, false);
1329         return retval;
1330 }
1331
1332 static void handle_update(struct work_struct *work)
1333 {
1334         struct cpufreq_policy *policy =
1335                 container_of(work, struct cpufreq_policy, update);
1336         unsigned int cpu = policy->cpu;
1337         pr_debug("handle_update for cpu %u called\n", cpu);
1338         cpufreq_update_policy(cpu);
1339 }
1340
1341 /**
1342  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1343  *      in deep trouble.
1344  *      @cpu: cpu number
1345  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1346  *      @new_freq: CPU frequency the CPU actually runs at
1347  *
1348  *      We adjust to current frequency first, and need to clean up later.
1349  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1350  */
1351 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1352                                 unsigned int new_freq)
1353 {
1354         struct cpufreq_policy *policy;
1355         struct cpufreq_freqs freqs;
1356         unsigned long flags;
1357
1358         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1359                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1360
1361         freqs.old = old_freq;
1362         freqs.new = new_freq;
1363
1364         read_lock_irqsave(&cpufreq_driver_lock, flags);
1365         policy = per_cpu(cpufreq_cpu_data, cpu);
1366         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1367
1368         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1369         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1370 }
1371
1372 /**
1373  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1374  * @cpu: CPU number
1375  *
1376  * This is the last known freq, without actually getting it from the driver.
1377  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1378  */
1379 unsigned int cpufreq_quick_get(unsigned int cpu)
1380 {
1381         struct cpufreq_policy *policy;
1382         unsigned int ret_freq = 0;
1383
1384         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1385                 return cpufreq_driver->get(cpu);
1386
1387         policy = cpufreq_cpu_get(cpu);
1388         if (policy) {
1389                 ret_freq = policy->cur;
1390                 cpufreq_cpu_put(policy);
1391         }
1392
1393         return ret_freq;
1394 }
1395 EXPORT_SYMBOL(cpufreq_quick_get);
1396
1397 /**
1398  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1399  * @cpu: CPU number
1400  *
1401  * Just return the max possible frequency for a given CPU.
1402  */
1403 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1404 {
1405         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1406         unsigned int ret_freq = 0;
1407
1408         if (policy) {
1409                 ret_freq = policy->max;
1410                 cpufreq_cpu_put(policy);
1411         }
1412
1413         return ret_freq;
1414 }
1415 EXPORT_SYMBOL(cpufreq_quick_get_max);
1416
1417 static unsigned int __cpufreq_get(unsigned int cpu)
1418 {
1419         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1420         unsigned int ret_freq = 0;
1421
1422         if (!cpufreq_driver->get)
1423                 return ret_freq;
1424
1425         ret_freq = cpufreq_driver->get(cpu);
1426
1427         if (ret_freq && policy->cur &&
1428                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1429                 /* verify no discrepancy between actual and
1430                                         saved value exists */
1431                 if (unlikely(ret_freq != policy->cur)) {
1432                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1433                         schedule_work(&policy->update);
1434                 }
1435         }
1436
1437         return ret_freq;
1438 }
1439
1440 /**
1441  * cpufreq_get - get the current CPU frequency (in kHz)
1442  * @cpu: CPU number
1443  *
1444  * Get the CPU current (static) CPU frequency
1445  */
1446 unsigned int cpufreq_get(unsigned int cpu)
1447 {
1448         unsigned int ret_freq = 0;
1449         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1450
1451         if (!policy)
1452                 goto out;
1453
1454         if (unlikely(lock_policy_rwsem_read(cpu)))
1455                 goto out_policy;
1456
1457         ret_freq = __cpufreq_get(cpu);
1458
1459         unlock_policy_rwsem_read(cpu);
1460
1461 out_policy:
1462         cpufreq_cpu_put(policy);
1463 out:
1464         return ret_freq;
1465 }
1466 EXPORT_SYMBOL(cpufreq_get);
1467
1468 static struct subsys_interface cpufreq_interface = {
1469         .name           = "cpufreq",
1470         .subsys         = &cpu_subsys,
1471         .add_dev        = cpufreq_add_dev,
1472         .remove_dev     = cpufreq_remove_dev,
1473 };
1474
1475 /**
1476  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1477  *
1478  * This function is only executed for the boot processor.  The other CPUs
1479  * have been put offline by means of CPU hotplug.
1480  */
1481 static int cpufreq_bp_suspend(void)
1482 {
1483         int ret = 0;
1484
1485         int cpu = smp_processor_id();
1486         struct cpufreq_policy *cpu_policy;
1487
1488         pr_debug("suspending cpu %u\n", cpu);
1489
1490         /* If there's no policy for the boot CPU, we have nothing to do. */
1491         cpu_policy = cpufreq_cpu_get(cpu);
1492         if (!cpu_policy)
1493                 return 0;
1494
1495         if (cpufreq_driver->suspend) {
1496                 ret = cpufreq_driver->suspend(cpu_policy);
1497                 if (ret)
1498                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1499                                         "step on CPU %u\n", cpu_policy->cpu);
1500         }
1501
1502         cpufreq_cpu_put(cpu_policy);
1503         return ret;
1504 }
1505
1506 /**
1507  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1508  *
1509  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1510  *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1511  *          restored. It will verify that the current freq is in sync with
1512  *          what we believe it to be. This is a bit later than when it
1513  *          should be, but nonethteless it's better than calling
1514  *          cpufreq_driver->get() here which might re-enable interrupts...
1515  *
1516  * This function is only executed for the boot CPU.  The other CPUs have not
1517  * been turned on yet.
1518  */
1519 static void cpufreq_bp_resume(void)
1520 {
1521         int ret = 0;
1522
1523         int cpu = smp_processor_id();
1524         struct cpufreq_policy *cpu_policy;
1525
1526         pr_debug("resuming cpu %u\n", cpu);
1527
1528         /* If there's no policy for the boot CPU, we have nothing to do. */
1529         cpu_policy = cpufreq_cpu_get(cpu);
1530         if (!cpu_policy)
1531                 return;
1532
1533         if (cpufreq_driver->resume) {
1534                 ret = cpufreq_driver->resume(cpu_policy);
1535                 if (ret) {
1536                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1537                                         "step on CPU %u\n", cpu_policy->cpu);
1538                         goto fail;
1539                 }
1540         }
1541
1542         schedule_work(&cpu_policy->update);
1543
1544 fail:
1545         cpufreq_cpu_put(cpu_policy);
1546 }
1547
1548 static struct syscore_ops cpufreq_syscore_ops = {
1549         .suspend        = cpufreq_bp_suspend,
1550         .resume         = cpufreq_bp_resume,
1551 };
1552
1553 /**
1554  *      cpufreq_get_current_driver - return current driver's name
1555  *
1556  *      Return the name string of the currently loaded cpufreq driver
1557  *      or NULL, if none.
1558  */
1559 const char *cpufreq_get_current_driver(void)
1560 {
1561         if (cpufreq_driver)
1562                 return cpufreq_driver->name;
1563
1564         return NULL;
1565 }
1566 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1567
1568 /*********************************************************************
1569  *                     NOTIFIER LISTS INTERFACE                      *
1570  *********************************************************************/
1571
1572 /**
1573  *      cpufreq_register_notifier - register a driver with cpufreq
1574  *      @nb: notifier function to register
1575  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1576  *
1577  *      Add a driver to one of two lists: either a list of drivers that
1578  *      are notified about clock rate changes (once before and once after
1579  *      the transition), or a list of drivers that are notified about
1580  *      changes in cpufreq policy.
1581  *
1582  *      This function may sleep, and has the same return conditions as
1583  *      blocking_notifier_chain_register.
1584  */
1585 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1586 {
1587         int ret;
1588
1589         if (cpufreq_disabled())
1590                 return -EINVAL;
1591
1592         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1593
1594         switch (list) {
1595         case CPUFREQ_TRANSITION_NOTIFIER:
1596                 ret = srcu_notifier_chain_register(
1597                                 &cpufreq_transition_notifier_list, nb);
1598                 break;
1599         case CPUFREQ_POLICY_NOTIFIER:
1600                 ret = blocking_notifier_chain_register(
1601                                 &cpufreq_policy_notifier_list, nb);
1602                 break;
1603         default:
1604                 ret = -EINVAL;
1605         }
1606
1607         return ret;
1608 }
1609 EXPORT_SYMBOL(cpufreq_register_notifier);
1610
1611 /**
1612  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1613  *      @nb: notifier block to be unregistered
1614  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1615  *
1616  *      Remove a driver from the CPU frequency notifier list.
1617  *
1618  *      This function may sleep, and has the same return conditions as
1619  *      blocking_notifier_chain_unregister.
1620  */
1621 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1622 {
1623         int ret;
1624
1625         if (cpufreq_disabled())
1626                 return -EINVAL;
1627
1628         switch (list) {
1629         case CPUFREQ_TRANSITION_NOTIFIER:
1630                 ret = srcu_notifier_chain_unregister(
1631                                 &cpufreq_transition_notifier_list, nb);
1632                 break;
1633         case CPUFREQ_POLICY_NOTIFIER:
1634                 ret = blocking_notifier_chain_unregister(
1635                                 &cpufreq_policy_notifier_list, nb);
1636                 break;
1637         default:
1638                 ret = -EINVAL;
1639         }
1640
1641         return ret;
1642 }
1643 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1644
1645
1646 /*********************************************************************
1647  *                              GOVERNORS                            *
1648  *********************************************************************/
1649
1650 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1651                             unsigned int target_freq,
1652                             unsigned int relation)
1653 {
1654         int retval = -EINVAL;
1655         unsigned int old_target_freq = target_freq;
1656
1657         if (cpufreq_disabled())
1658                 return -ENODEV;
1659         if (policy->transition_ongoing)
1660                 return -EBUSY;
1661
1662         /* Make sure that target_freq is within supported range */
1663         if (target_freq > policy->max)
1664                 target_freq = policy->max;
1665         if (target_freq < policy->min)
1666                 target_freq = policy->min;
1667
1668         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1669                         policy->cpu, target_freq, relation, old_target_freq);
1670
1671         if (target_freq == policy->cur)
1672                 return 0;
1673
1674         if (cpufreq_driver->target)
1675                 retval = cpufreq_driver->target(policy, target_freq, relation);
1676
1677         return retval;
1678 }
1679 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1680
1681 int cpufreq_driver_target(struct cpufreq_policy *policy,
1682                           unsigned int target_freq,
1683                           unsigned int relation)
1684 {
1685         int ret = -EINVAL;
1686
1687         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1688                 goto fail;
1689
1690         ret = __cpufreq_driver_target(policy, target_freq, relation);
1691
1692         unlock_policy_rwsem_write(policy->cpu);
1693
1694 fail:
1695         return ret;
1696 }
1697 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1698
1699 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1700 {
1701         if (cpufreq_disabled())
1702                 return 0;
1703
1704         if (!cpufreq_driver->getavg)
1705                 return 0;
1706
1707         return cpufreq_driver->getavg(policy, cpu);
1708 }
1709 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1710
1711 /*
1712  * when "event" is CPUFREQ_GOV_LIMITS
1713  */
1714
1715 static int __cpufreq_governor(struct cpufreq_policy *policy,
1716                                         unsigned int event)
1717 {
1718         int ret;
1719
1720         /* Only must be defined when default governor is known to have latency
1721            restrictions, like e.g. conservative or ondemand.
1722            That this is the case is already ensured in Kconfig
1723         */
1724 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1725         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1726 #else
1727         struct cpufreq_governor *gov = NULL;
1728 #endif
1729
1730         if (policy->governor->max_transition_latency &&
1731             policy->cpuinfo.transition_latency >
1732             policy->governor->max_transition_latency) {
1733                 if (!gov)
1734                         return -EINVAL;
1735                 else {
1736                         printk(KERN_WARNING "%s governor failed, too long"
1737                                " transition latency of HW, fallback"
1738                                " to %s governor\n",
1739                                policy->governor->name,
1740                                gov->name);
1741                         policy->governor = gov;
1742                 }
1743         }
1744
1745         if (!try_module_get(policy->governor->owner))
1746                 return -EINVAL;
1747
1748         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1749                                                 policy->cpu, event);
1750
1751         mutex_lock(&cpufreq_governor_lock);
1752         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1753             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1754                 mutex_unlock(&cpufreq_governor_lock);
1755                 return -EBUSY;
1756         }
1757
1758         if (event == CPUFREQ_GOV_STOP)
1759                 policy->governor_enabled = false;
1760         else if (event == CPUFREQ_GOV_START)
1761                 policy->governor_enabled = true;
1762
1763         mutex_unlock(&cpufreq_governor_lock);
1764
1765         ret = policy->governor->governor(policy, event);
1766
1767         if (!ret) {
1768                 if (event == CPUFREQ_GOV_POLICY_INIT)
1769                         policy->governor->initialized++;
1770                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1771                         policy->governor->initialized--;
1772         } else {
1773                 /* Restore original values */
1774                 mutex_lock(&cpufreq_governor_lock);
1775                 if (event == CPUFREQ_GOV_STOP)
1776                         policy->governor_enabled = true;
1777                 else if (event == CPUFREQ_GOV_START)
1778                         policy->governor_enabled = false;
1779                 mutex_unlock(&cpufreq_governor_lock);
1780         }
1781
1782         /* we keep one module reference alive for
1783                         each CPU governed by this CPU */
1784         if ((event != CPUFREQ_GOV_START) || ret)
1785                 module_put(policy->governor->owner);
1786         if ((event == CPUFREQ_GOV_STOP) && !ret)
1787                 module_put(policy->governor->owner);
1788
1789         return ret;
1790 }
1791
1792 int cpufreq_register_governor(struct cpufreq_governor *governor)
1793 {
1794         int err;
1795
1796         if (!governor)
1797                 return -EINVAL;
1798
1799         if (cpufreq_disabled())
1800                 return -ENODEV;
1801
1802         mutex_lock(&cpufreq_governor_mutex);
1803
1804         governor->initialized = 0;
1805         err = -EBUSY;
1806         if (__find_governor(governor->name) == NULL) {
1807                 err = 0;
1808                 list_add(&governor->governor_list, &cpufreq_governor_list);
1809         }
1810
1811         mutex_unlock(&cpufreq_governor_mutex);
1812         return err;
1813 }
1814 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1815
1816 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1817 {
1818 #ifdef CONFIG_HOTPLUG_CPU
1819         int cpu;
1820 #endif
1821
1822         if (!governor)
1823                 return;
1824
1825         if (cpufreq_disabled())
1826                 return;
1827
1828 #ifdef CONFIG_HOTPLUG_CPU
1829         for_each_present_cpu(cpu) {
1830                 if (cpu_online(cpu))
1831                         continue;
1832                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1833                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1834         }
1835 #endif
1836
1837         mutex_lock(&cpufreq_governor_mutex);
1838         list_del(&governor->governor_list);
1839         mutex_unlock(&cpufreq_governor_mutex);
1840         return;
1841 }
1842 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1843
1844
1845 /*********************************************************************
1846  *                          POLICY INTERFACE                         *
1847  *********************************************************************/
1848
1849 /**
1850  * cpufreq_get_policy - get the current cpufreq_policy
1851  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1852  *      is written
1853  *
1854  * Reads the current cpufreq policy.
1855  */
1856 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1857 {
1858         struct cpufreq_policy *cpu_policy;
1859         if (!policy)
1860                 return -EINVAL;
1861
1862         cpu_policy = cpufreq_cpu_get(cpu);
1863         if (!cpu_policy)
1864                 return -EINVAL;
1865
1866         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1867
1868         cpufreq_cpu_put(cpu_policy);
1869         return 0;
1870 }
1871 EXPORT_SYMBOL(cpufreq_get_policy);
1872
1873 /*
1874  * data   : current policy.
1875  * policy : policy to be set.
1876  */
1877 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1878                                 struct cpufreq_policy *policy)
1879 {
1880         int ret = 0, failed = 1;
1881
1882         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1883                 policy->min, policy->max);
1884
1885         memcpy(&policy->cpuinfo, &data->cpuinfo,
1886                                 sizeof(struct cpufreq_cpuinfo));
1887
1888         if (policy->min > data->max || policy->max < data->min) {
1889                 ret = -EINVAL;
1890                 goto error_out;
1891         }
1892
1893         /* verify the cpu speed can be set within this limit */
1894         ret = cpufreq_driver->verify(policy);
1895         if (ret)
1896                 goto error_out;
1897
1898         /* adjust if necessary - all reasons */
1899         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1900                         CPUFREQ_ADJUST, policy);
1901
1902         /* adjust if necessary - hardware incompatibility*/
1903         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1904                         CPUFREQ_INCOMPATIBLE, policy);
1905
1906         /*
1907          * verify the cpu speed can be set within this limit, which might be
1908          * different to the first one
1909          */
1910         ret = cpufreq_driver->verify(policy);
1911         if (ret)
1912                 goto error_out;
1913
1914         /* notification of the new policy */
1915         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1916                         CPUFREQ_NOTIFY, policy);
1917
1918         data->min = policy->min;
1919         data->max = policy->max;
1920
1921         pr_debug("new min and max freqs are %u - %u kHz\n",
1922                                         data->min, data->max);
1923
1924         if (cpufreq_driver->setpolicy) {
1925                 data->policy = policy->policy;
1926                 pr_debug("setting range\n");
1927                 ret = cpufreq_driver->setpolicy(policy);
1928         } else {
1929                 if (policy->governor != data->governor) {
1930                         /* save old, working values */
1931                         struct cpufreq_governor *old_gov = data->governor;
1932
1933                         pr_debug("governor switch\n");
1934
1935                         /* end old governor */
1936                         if (data->governor) {
1937                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1938                                 unlock_policy_rwsem_write(policy->cpu);
1939                                 __cpufreq_governor(data,
1940                                                 CPUFREQ_GOV_POLICY_EXIT);
1941                                 lock_policy_rwsem_write(policy->cpu);
1942                         }
1943
1944                         /* start new governor */
1945                         data->governor = policy->governor;
1946                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1947                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1948                                         failed = 0;
1949                                 } else {
1950                                         unlock_policy_rwsem_write(policy->cpu);
1951                                         __cpufreq_governor(data,
1952                                                         CPUFREQ_GOV_POLICY_EXIT);
1953                                         lock_policy_rwsem_write(policy->cpu);
1954                                 }
1955                         }
1956
1957                         if (failed) {
1958                                 /* new governor failed, so re-start old one */
1959                                 pr_debug("starting governor %s failed\n",
1960                                                         data->governor->name);
1961                                 if (old_gov) {
1962                                         data->governor = old_gov;
1963                                         __cpufreq_governor(data,
1964                                                         CPUFREQ_GOV_POLICY_INIT);
1965                                         __cpufreq_governor(data,
1966                                                            CPUFREQ_GOV_START);
1967                                 }
1968                                 ret = -EINVAL;
1969                                 goto error_out;
1970                         }
1971                         /* might be a policy change, too, so fall through */
1972                 }
1973                 pr_debug("governor: change or update limits\n");
1974                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1975         }
1976
1977 error_out:
1978         return ret;
1979 }
1980
1981 /**
1982  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1983  *      @cpu: CPU which shall be re-evaluated
1984  *
1985  *      Useful for policy notifiers which have different necessities
1986  *      at different times.
1987  */
1988 int cpufreq_update_policy(unsigned int cpu)
1989 {
1990         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1991         struct cpufreq_policy policy;
1992         int ret;
1993
1994         if (!data) {
1995                 ret = -ENODEV;
1996                 goto no_policy;
1997         }
1998
1999         if (unlikely(lock_policy_rwsem_write(cpu))) {
2000                 ret = -EINVAL;
2001                 goto fail;
2002         }
2003
2004         pr_debug("updating policy for CPU %u\n", cpu);
2005         memcpy(&policy, data, sizeof(struct cpufreq_policy));
2006         policy.min = data->user_policy.min;
2007         policy.max = data->user_policy.max;
2008         policy.policy = data->user_policy.policy;
2009         policy.governor = data->user_policy.governor;
2010
2011         /*
2012          * BIOS might change freq behind our back
2013          * -> ask driver for current freq and notify governors about a change
2014          */
2015         if (cpufreq_driver->get) {
2016                 policy.cur = cpufreq_driver->get(cpu);
2017                 if (!data->cur) {
2018                         pr_debug("Driver did not initialize current freq");
2019                         data->cur = policy.cur;
2020                 } else {
2021                         if (data->cur != policy.cur && cpufreq_driver->target)
2022                                 cpufreq_out_of_sync(cpu, data->cur,
2023                                                                 policy.cur);
2024                 }
2025         }
2026
2027         ret = __cpufreq_set_policy(data, &policy);
2028
2029         unlock_policy_rwsem_write(cpu);
2030
2031 fail:
2032         cpufreq_cpu_put(data);
2033 no_policy:
2034         return ret;
2035 }
2036 EXPORT_SYMBOL(cpufreq_update_policy);
2037
2038 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2039                                         unsigned long action, void *hcpu)
2040 {
2041         unsigned int cpu = (unsigned long)hcpu;
2042         struct device *dev;
2043
2044         dev = get_cpu_device(cpu);
2045         if (dev) {
2046                 switch (action) {
2047                 case CPU_ONLINE:
2048                 case CPU_ONLINE_FROZEN:
2049                         cpufreq_add_dev(dev, NULL);
2050                         cpufreq_update_policy(cpu);
2051                         break;
2052                 case CPU_DOWN_PREPARE:
2053                 case CPU_DOWN_PREPARE_FROZEN:
2054                         __cpufreq_remove_dev(dev, NULL, false);
2055                         break;
2056                 case CPU_DOWN_FAILED:
2057                 case CPU_DOWN_FAILED_FROZEN:
2058                         cpufreq_add_dev(dev, NULL);
2059                         break;
2060                 }
2061         }
2062         return NOTIFY_OK;
2063 }
2064
2065 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2066         .notifier_call = cpufreq_cpu_callback,
2067 };
2068
2069 /*********************************************************************
2070  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2071  *********************************************************************/
2072
2073 /**
2074  * cpufreq_register_driver - register a CPU Frequency driver
2075  * @driver_data: A struct cpufreq_driver containing the values#
2076  * submitted by the CPU Frequency driver.
2077  *
2078  * Registers a CPU Frequency driver to this core code. This code
2079  * returns zero on success, -EBUSY when another driver got here first
2080  * (and isn't unregistered in the meantime).
2081  *
2082  */
2083 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2084 {
2085         unsigned long flags;
2086         int ret;
2087
2088         if (cpufreq_disabled())
2089                 return -ENODEV;
2090
2091         if (!driver_data || !driver_data->verify || !driver_data->init ||
2092             ((!driver_data->setpolicy) && (!driver_data->target)))
2093                 return -EINVAL;
2094
2095         pr_debug("trying to register driver %s\n", driver_data->name);
2096
2097         if (driver_data->setpolicy)
2098                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2099
2100         write_lock_irqsave(&cpufreq_driver_lock, flags);
2101         if (cpufreq_driver) {
2102                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2103                 return -EBUSY;
2104         }
2105         cpufreq_driver = driver_data;
2106         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2107
2108         ret = subsys_interface_register(&cpufreq_interface);
2109         if (ret)
2110                 goto err_null_driver;
2111
2112         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2113                 int i;
2114                 ret = -ENODEV;
2115
2116                 /* check for at least one working CPU */
2117                 for (i = 0; i < nr_cpu_ids; i++)
2118                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2119                                 ret = 0;
2120                                 break;
2121                         }
2122
2123                 /* if all ->init() calls failed, unregister */
2124                 if (ret) {
2125                         pr_debug("no CPU initialized for driver %s\n",
2126                                                         driver_data->name);
2127                         goto err_if_unreg;
2128                 }
2129         }
2130
2131         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2132         pr_debug("driver %s up and running\n", driver_data->name);
2133
2134         return 0;
2135 err_if_unreg:
2136         subsys_interface_unregister(&cpufreq_interface);
2137 err_null_driver:
2138         write_lock_irqsave(&cpufreq_driver_lock, flags);
2139         cpufreq_driver = NULL;
2140         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2141         return ret;
2142 }
2143 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2144
2145 /**
2146  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2147  *
2148  * Unregister the current CPUFreq driver. Only call this if you have
2149  * the right to do so, i.e. if you have succeeded in initialising before!
2150  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2151  * currently not initialised.
2152  */
2153 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2154 {
2155         unsigned long flags;
2156
2157         if (!cpufreq_driver || (driver != cpufreq_driver))
2158                 return -EINVAL;
2159
2160         pr_debug("unregistering driver %s\n", driver->name);
2161
2162         subsys_interface_unregister(&cpufreq_interface);
2163         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2164
2165         write_lock_irqsave(&cpufreq_driver_lock, flags);
2166         cpufreq_driver = NULL;
2167         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2168
2169         return 0;
2170 }
2171 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2172
2173 static int __init cpufreq_core_init(void)
2174 {
2175         int cpu;
2176
2177         if (cpufreq_disabled())
2178                 return -ENODEV;
2179
2180         for_each_possible_cpu(cpu) {
2181                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2182                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2183         }
2184
2185         cpufreq_global_kobject = kobject_create();
2186         BUG_ON(!cpufreq_global_kobject);
2187         register_syscore_ops(&cpufreq_syscore_ops);
2188
2189         return 0;
2190 }
2191 core_initcall(cpufreq_core_init);