2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
117 pure_initcall(init_cpufreq_transition_notifier_list);
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
124 void disable_cpufreq(void)
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
131 bool have_governor_per_policy(void)
133 return cpufreq_driver->have_governor_per_policy;
136 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
138 struct cpufreq_policy *data;
141 if (cpu >= nr_cpu_ids)
144 /* get the cpufreq driver */
145 read_lock_irqsave(&cpufreq_driver_lock, flags);
150 if (!try_module_get(cpufreq_driver->owner))
155 data = per_cpu(cpufreq_cpu_data, cpu);
158 goto err_out_put_module;
160 if (!sysfs && !kobject_get(&data->kobj))
161 goto err_out_put_module;
163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
167 module_put(cpufreq_driver->owner);
169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
174 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
176 if (cpufreq_disabled())
179 return __cpufreq_cpu_get(cpu, false);
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
183 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
185 return __cpufreq_cpu_get(cpu, true);
188 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
191 kobject_put(&data->kobj);
192 module_put(cpufreq_driver->owner);
195 void cpufreq_cpu_put(struct cpufreq_policy *data)
197 if (cpufreq_disabled())
200 __cpufreq_cpu_put(data, false);
202 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
204 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
206 __cpufreq_cpu_put(data, true);
209 /*********************************************************************
210 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
211 *********************************************************************/
214 * adjust_jiffies - adjust the system "loops_per_jiffy"
216 * This function alters the system "loops_per_jiffy" for the clock
217 * speed change. Note that loops_per_jiffy cannot be updated on SMP
218 * systems as each CPU might be scaled differently. So, use the arch
219 * per-CPU loops_per_jiffy value wherever possible.
222 static unsigned long l_p_j_ref;
223 static unsigned int l_p_j_ref_freq;
225 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
227 if (ci->flags & CPUFREQ_CONST_LOOPS)
230 if (!l_p_j_ref_freq) {
231 l_p_j_ref = loops_per_jiffy;
232 l_p_j_ref_freq = ci->old;
233 pr_debug("saving %lu as reference value for loops_per_jiffy; "
234 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
236 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
237 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
238 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
240 pr_debug("scaling loops_per_jiffy to %lu "
241 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
245 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
252 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
253 struct cpufreq_freqs *freqs, unsigned int state)
255 BUG_ON(irqs_disabled());
257 if (cpufreq_disabled())
260 freqs->flags = cpufreq_driver->flags;
261 pr_debug("notification %u of frequency transition to %u kHz\n",
266 case CPUFREQ_PRECHANGE:
267 /* detect if the driver reported a value as "old frequency"
268 * which is not equal to what the cpufreq core thinks is
271 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
272 if ((policy) && (policy->cpu == freqs->cpu) &&
273 (policy->cur) && (policy->cur != freqs->old)) {
274 pr_debug("Warning: CPU frequency is"
275 " %u, cpufreq assumed %u kHz.\n",
276 freqs->old, policy->cur);
277 freqs->old = policy->cur;
280 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
281 CPUFREQ_PRECHANGE, freqs);
282 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
285 case CPUFREQ_POSTCHANGE:
286 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
287 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
288 (unsigned long)freqs->cpu);
289 trace_cpu_frequency(freqs->new, freqs->cpu);
290 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
291 CPUFREQ_POSTCHANGE, freqs);
292 if (likely(policy) && likely(policy->cpu == freqs->cpu))
293 policy->cur = freqs->new;
298 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299 * on frequency transition.
301 * This function calls the transition notifiers and the "adjust_jiffies"
302 * function. It is called twice on all CPU frequency changes that have
305 void cpufreq_notify_transition(struct cpufreq_policy *policy,
306 struct cpufreq_freqs *freqs, unsigned int state)
308 for_each_cpu(freqs->cpu, policy->cpus)
309 __cpufreq_notify_transition(policy, freqs, state);
311 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
315 /*********************************************************************
317 *********************************************************************/
319 static struct cpufreq_governor *__find_governor(const char *str_governor)
321 struct cpufreq_governor *t;
323 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
324 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
331 * cpufreq_parse_governor - parse a governor string
333 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
334 struct cpufreq_governor **governor)
341 if (cpufreq_driver->setpolicy) {
342 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
343 *policy = CPUFREQ_POLICY_PERFORMANCE;
345 } else if (!strnicmp(str_governor, "powersave",
347 *policy = CPUFREQ_POLICY_POWERSAVE;
350 } else if (cpufreq_driver->target) {
351 struct cpufreq_governor *t;
353 mutex_lock(&cpufreq_governor_mutex);
355 t = __find_governor(str_governor);
360 mutex_unlock(&cpufreq_governor_mutex);
361 ret = request_module("cpufreq_%s", str_governor);
362 mutex_lock(&cpufreq_governor_mutex);
365 t = __find_governor(str_governor);
373 mutex_unlock(&cpufreq_governor_mutex);
381 * cpufreq_per_cpu_attr_read() / show_##file_name() -
382 * print out cpufreq information
384 * Write out information from cpufreq_driver->policy[cpu]; object must be
388 #define show_one(file_name, object) \
389 static ssize_t show_##file_name \
390 (struct cpufreq_policy *policy, char *buf) \
392 return sprintf(buf, "%u\n", policy->object); \
395 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
396 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
397 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
398 show_one(scaling_min_freq, min);
399 show_one(scaling_max_freq, max);
400 show_one(scaling_cur_freq, cur);
402 static int __cpufreq_set_policy(struct cpufreq_policy *data,
403 struct cpufreq_policy *policy);
406 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
408 #define store_one(file_name, object) \
409 static ssize_t store_##file_name \
410 (struct cpufreq_policy *policy, const char *buf, size_t count) \
413 struct cpufreq_policy new_policy; \
415 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
419 ret = sscanf(buf, "%u", &new_policy.object); \
423 ret = __cpufreq_set_policy(policy, &new_policy); \
424 policy->user_policy.object = policy->object; \
426 return ret ? ret : count; \
429 store_one(scaling_min_freq, min);
430 store_one(scaling_max_freq, max);
433 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
435 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
438 unsigned int cur_freq = __cpufreq_get(policy->cpu);
440 return sprintf(buf, "<unknown>");
441 return sprintf(buf, "%u\n", cur_freq);
446 * show_scaling_governor - show the current policy for the specified CPU
448 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
450 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
451 return sprintf(buf, "powersave\n");
452 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
453 return sprintf(buf, "performance\n");
454 else if (policy->governor)
455 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
456 policy->governor->name);
462 * store_scaling_governor - store policy for the specified CPU
464 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
465 const char *buf, size_t count)
468 char str_governor[16];
469 struct cpufreq_policy new_policy;
471 ret = cpufreq_get_policy(&new_policy, policy->cpu);
475 ret = sscanf(buf, "%15s", str_governor);
479 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480 &new_policy.governor))
483 /* Do not use cpufreq_set_policy here or the user_policy.max
484 will be wrongly overridden */
485 ret = __cpufreq_set_policy(policy, &new_policy);
487 policy->user_policy.policy = policy->policy;
488 policy->user_policy.governor = policy->governor;
497 * show_scaling_driver - show the cpufreq driver currently loaded
499 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
501 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
505 * show_scaling_available_governors - show the available CPUfreq governors
507 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
511 struct cpufreq_governor *t;
513 if (!cpufreq_driver->target) {
514 i += sprintf(buf, "performance powersave");
518 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
519 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
520 - (CPUFREQ_NAME_LEN + 2)))
522 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
525 i += sprintf(&buf[i], "\n");
529 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
534 for_each_cpu(cpu, mask) {
536 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
538 if (i >= (PAGE_SIZE - 5))
541 i += sprintf(&buf[i], "\n");
546 * show_related_cpus - show the CPUs affected by each transition even if
547 * hw coordination is in use
549 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
551 return show_cpus(policy->related_cpus, buf);
555 * show_affected_cpus - show the CPUs affected by each transition
557 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
559 return show_cpus(policy->cpus, buf);
562 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
563 const char *buf, size_t count)
565 unsigned int freq = 0;
568 if (!policy->governor || !policy->governor->store_setspeed)
571 ret = sscanf(buf, "%u", &freq);
575 policy->governor->store_setspeed(policy, freq);
580 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
582 if (!policy->governor || !policy->governor->show_setspeed)
583 return sprintf(buf, "<unsupported>\n");
585 return policy->governor->show_setspeed(policy, buf);
589 * show_bios_limit - show the current cpufreq HW/BIOS limitation
591 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
595 if (cpufreq_driver->bios_limit) {
596 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
598 return sprintf(buf, "%u\n", limit);
600 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
603 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
604 cpufreq_freq_attr_ro(cpuinfo_min_freq);
605 cpufreq_freq_attr_ro(cpuinfo_max_freq);
606 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
607 cpufreq_freq_attr_ro(scaling_available_governors);
608 cpufreq_freq_attr_ro(scaling_driver);
609 cpufreq_freq_attr_ro(scaling_cur_freq);
610 cpufreq_freq_attr_ro(bios_limit);
611 cpufreq_freq_attr_ro(related_cpus);
612 cpufreq_freq_attr_ro(affected_cpus);
613 cpufreq_freq_attr_rw(scaling_min_freq);
614 cpufreq_freq_attr_rw(scaling_max_freq);
615 cpufreq_freq_attr_rw(scaling_governor);
616 cpufreq_freq_attr_rw(scaling_setspeed);
618 static struct attribute *default_attrs[] = {
619 &cpuinfo_min_freq.attr,
620 &cpuinfo_max_freq.attr,
621 &cpuinfo_transition_latency.attr,
622 &scaling_min_freq.attr,
623 &scaling_max_freq.attr,
626 &scaling_governor.attr,
627 &scaling_driver.attr,
628 &scaling_available_governors.attr,
629 &scaling_setspeed.attr,
633 struct kobject *cpufreq_global_kobject;
634 EXPORT_SYMBOL(cpufreq_global_kobject);
636 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637 #define to_attr(a) container_of(a, struct freq_attr, attr)
639 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
641 struct cpufreq_policy *policy = to_policy(kobj);
642 struct freq_attr *fattr = to_attr(attr);
643 ssize_t ret = -EINVAL;
644 policy = cpufreq_cpu_get_sysfs(policy->cpu);
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
652 ret = fattr->show(policy, buf);
656 unlock_policy_rwsem_read(policy->cpu);
658 cpufreq_cpu_put_sysfs(policy);
663 static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
668 ssize_t ret = -EINVAL;
669 policy = cpufreq_cpu_get_sysfs(policy->cpu);
673 if (lock_policy_rwsem_write(policy->cpu) < 0)
677 ret = fattr->store(policy, buf, count);
681 unlock_policy_rwsem_write(policy->cpu);
683 cpufreq_cpu_put_sysfs(policy);
688 static void cpufreq_sysfs_release(struct kobject *kobj)
690 struct cpufreq_policy *policy = to_policy(kobj);
691 pr_debug("last reference is dropped\n");
692 complete(&policy->kobj_unregister);
695 static const struct sysfs_ops sysfs_ops = {
700 static struct kobj_type ktype_cpufreq = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
703 .release = cpufreq_sysfs_release,
706 /* symlink affected CPUs */
707 static int cpufreq_add_dev_symlink(unsigned int cpu,
708 struct cpufreq_policy *policy)
713 for_each_cpu(j, policy->cpus) {
714 struct cpufreq_policy *managed_policy;
715 struct device *cpu_dev;
720 pr_debug("CPU %u already managed, adding link\n", j);
721 managed_policy = cpufreq_cpu_get(cpu);
722 cpu_dev = get_cpu_device(j);
723 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
726 cpufreq_cpu_put(managed_policy);
733 static int cpufreq_add_dev_interface(unsigned int cpu,
734 struct cpufreq_policy *policy,
737 struct cpufreq_policy new_policy;
738 struct freq_attr **drv_attr;
743 /* prepare interface data */
744 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
745 &dev->kobj, "cpufreq");
749 /* set up files for this cpu device */
750 drv_attr = cpufreq_driver->attr;
751 while ((drv_attr) && (*drv_attr)) {
752 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
754 goto err_out_kobj_put;
757 if (cpufreq_driver->get) {
758 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
760 goto err_out_kobj_put;
762 if (cpufreq_driver->target) {
763 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
765 goto err_out_kobj_put;
767 if (cpufreq_driver->bios_limit) {
768 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
770 goto err_out_kobj_put;
773 write_lock_irqsave(&cpufreq_driver_lock, flags);
774 for_each_cpu(j, policy->cpus) {
775 per_cpu(cpufreq_cpu_data, j) = policy;
776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
780 ret = cpufreq_add_dev_symlink(cpu, policy);
782 goto err_out_kobj_put;
784 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
785 /* assure that the starting sequence is run in __cpufreq_set_policy */
786 policy->governor = NULL;
788 /* set default policy */
789 ret = __cpufreq_set_policy(policy, &new_policy);
790 policy->user_policy.policy = policy->policy;
791 policy->user_policy.governor = policy->governor;
794 pr_debug("setting policy failed\n");
795 if (cpufreq_driver->exit)
796 cpufreq_driver->exit(policy);
801 kobject_put(&policy->kobj);
802 wait_for_completion(&policy->kobj_unregister);
806 #ifdef CONFIG_HOTPLUG_CPU
807 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
810 struct cpufreq_policy *policy;
811 int ret = 0, has_target = !!cpufreq_driver->target;
814 policy = cpufreq_cpu_get(sibling);
818 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
820 lock_policy_rwsem_write(sibling);
822 write_lock_irqsave(&cpufreq_driver_lock, flags);
824 cpumask_set_cpu(cpu, policy->cpus);
825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
826 per_cpu(cpufreq_cpu_data, cpu) = policy;
827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
829 unlock_policy_rwsem_write(sibling);
832 __cpufreq_governor(policy, CPUFREQ_GOV_START);
833 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
836 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
838 cpufreq_cpu_put(policy);
847 * cpufreq_add_dev - add a CPU device
849 * Adds the cpufreq interface for a CPU device.
851 * The Oracle says: try running cpufreq registration/unregistration concurrently
852 * with with cpu hotplugging and all hell will break loose. Tried to clean this
853 * mess up, but more thorough testing is needed. - Mathieu
855 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
857 unsigned int j, cpu = dev->id;
859 struct cpufreq_policy *policy;
861 #ifdef CONFIG_HOTPLUG_CPU
862 struct cpufreq_governor *gov;
866 if (cpu_is_offline(cpu))
869 pr_debug("adding CPU %u\n", cpu);
872 /* check whether a different CPU already registered this
873 * CPU because it is in the same boat. */
874 policy = cpufreq_cpu_get(cpu);
875 if (unlikely(policy)) {
876 cpufreq_cpu_put(policy);
880 #ifdef CONFIG_HOTPLUG_CPU
881 /* Check if this cpu was hot-unplugged earlier and has siblings */
882 read_lock_irqsave(&cpufreq_driver_lock, flags);
883 for_each_online_cpu(sibling) {
884 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
885 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
886 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
887 return cpufreq_add_policy_cpu(cpu, sibling, dev);
890 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
894 if (!try_module_get(cpufreq_driver->owner)) {
899 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
903 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
904 goto err_free_policy;
906 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
907 goto err_free_cpumask;
910 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
911 cpumask_copy(policy->cpus, cpumask_of(cpu));
913 /* Initially set CPU itself as the policy_cpu */
914 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
916 init_completion(&policy->kobj_unregister);
917 INIT_WORK(&policy->update, handle_update);
919 /* call driver. From then on the cpufreq must be able
920 * to accept all calls to ->verify and ->setpolicy for this CPU
922 ret = cpufreq_driver->init(policy);
924 pr_debug("initialization failed\n");
925 goto err_set_policy_cpu;
928 /* related cpus should atleast have policy->cpus */
929 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
932 * affected cpus must always be the one, which are online. We aren't
933 * managing offline cpus here.
935 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
937 policy->user_policy.min = policy->min;
938 policy->user_policy.max = policy->max;
940 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
941 CPUFREQ_START, policy);
943 #ifdef CONFIG_HOTPLUG_CPU
944 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
946 policy->governor = gov;
947 pr_debug("Restoring governor %s for cpu %d\n",
948 policy->governor->name, cpu);
952 ret = cpufreq_add_dev_interface(cpu, policy, dev);
954 goto err_out_unregister;
956 kobject_uevent(&policy->kobj, KOBJ_ADD);
957 module_put(cpufreq_driver->owner);
958 pr_debug("initialization complete\n");
963 write_lock_irqsave(&cpufreq_driver_lock, flags);
964 for_each_cpu(j, policy->cpus)
965 per_cpu(cpufreq_cpu_data, j) = NULL;
966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
968 kobject_put(&policy->kobj);
969 wait_for_completion(&policy->kobj_unregister);
972 per_cpu(cpufreq_policy_cpu, cpu) = -1;
973 free_cpumask_var(policy->related_cpus);
975 free_cpumask_var(policy->cpus);
979 module_put(cpufreq_driver->owner);
984 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
988 policy->last_cpu = policy->cpu;
991 for_each_cpu(j, policy->cpus)
992 per_cpu(cpufreq_policy_cpu, j) = cpu;
994 #ifdef CONFIG_CPU_FREQ_TABLE
995 cpufreq_frequency_table_update_policy_cpu(policy);
997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 CPUFREQ_UPDATE_POLICY_CPU, policy);
1002 * __cpufreq_remove_dev - remove a CPU device
1004 * Removes the cpufreq interface for a CPU device.
1005 * Caller should already have policy_rwsem in write mode for this CPU.
1006 * This routine frees the rwsem before returning.
1008 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1010 unsigned int cpu = dev->id, ret, cpus;
1011 unsigned long flags;
1012 struct cpufreq_policy *data;
1013 struct kobject *kobj;
1014 struct completion *cmp;
1015 struct device *cpu_dev;
1017 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
1021 data = per_cpu(cpufreq_cpu_data, cpu);
1022 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1027 pr_debug("%s: No cpu_data found\n", __func__);
1031 if (cpufreq_driver->target)
1032 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1034 #ifdef CONFIG_HOTPLUG_CPU
1035 if (!cpufreq_driver->setpolicy)
1036 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1037 data->governor->name, CPUFREQ_NAME_LEN);
1040 WARN_ON(lock_policy_rwsem_write(cpu));
1041 cpus = cpumask_weight(data->cpus);
1044 cpumask_clear_cpu(cpu, data->cpus);
1045 unlock_policy_rwsem_write(cpu);
1047 if (cpu != data->cpu) {
1048 sysfs_remove_link(&dev->kobj, "cpufreq");
1049 } else if (cpus > 1) {
1050 /* first sibling now owns the new sysfs dir */
1051 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1052 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1053 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1055 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1057 WARN_ON(lock_policy_rwsem_write(cpu));
1058 cpumask_set_cpu(cpu, data->cpus);
1060 write_lock_irqsave(&cpufreq_driver_lock, flags);
1061 per_cpu(cpufreq_cpu_data, cpu) = data;
1062 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1064 unlock_policy_rwsem_write(cpu);
1066 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1071 WARN_ON(lock_policy_rwsem_write(cpu));
1072 update_policy_cpu(data, cpu_dev->id);
1073 unlock_policy_rwsem_write(cpu);
1074 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1075 __func__, cpu_dev->id, cpu);
1078 /* If cpu is last user of policy, free policy */
1080 if (cpufreq_driver->target)
1081 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1083 lock_policy_rwsem_read(cpu);
1085 cmp = &data->kobj_unregister;
1086 unlock_policy_rwsem_read(cpu);
1089 /* we need to make sure that the underlying kobj is actually
1090 * not referenced anymore by anybody before we proceed with
1093 pr_debug("waiting for dropping of refcount\n");
1094 wait_for_completion(cmp);
1095 pr_debug("wait complete\n");
1097 if (cpufreq_driver->exit)
1098 cpufreq_driver->exit(data);
1100 free_cpumask_var(data->related_cpus);
1101 free_cpumask_var(data->cpus);
1104 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1105 cpufreq_cpu_put(data);
1106 if (cpufreq_driver->target) {
1107 __cpufreq_governor(data, CPUFREQ_GOV_START);
1108 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1112 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1117 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1119 unsigned int cpu = dev->id;
1122 if (cpu_is_offline(cpu))
1125 retval = __cpufreq_remove_dev(dev, sif);
1130 static void handle_update(struct work_struct *work)
1132 struct cpufreq_policy *policy =
1133 container_of(work, struct cpufreq_policy, update);
1134 unsigned int cpu = policy->cpu;
1135 pr_debug("handle_update for cpu %u called\n", cpu);
1136 cpufreq_update_policy(cpu);
1140 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1142 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1143 * @new_freq: CPU frequency the CPU actually runs at
1145 * We adjust to current frequency first, and need to clean up later.
1146 * So either call to cpufreq_update_policy() or schedule handle_update()).
1148 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1149 unsigned int new_freq)
1151 struct cpufreq_policy *policy;
1152 struct cpufreq_freqs freqs;
1153 unsigned long flags;
1156 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1157 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1159 freqs.old = old_freq;
1160 freqs.new = new_freq;
1162 read_lock_irqsave(&cpufreq_driver_lock, flags);
1163 policy = per_cpu(cpufreq_cpu_data, cpu);
1164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1167 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1172 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1175 * This is the last known freq, without actually getting it from the driver.
1176 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1178 unsigned int cpufreq_quick_get(unsigned int cpu)
1180 struct cpufreq_policy *policy;
1181 unsigned int ret_freq = 0;
1183 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1184 return cpufreq_driver->get(cpu);
1186 policy = cpufreq_cpu_get(cpu);
1188 ret_freq = policy->cur;
1189 cpufreq_cpu_put(policy);
1194 EXPORT_SYMBOL(cpufreq_quick_get);
1197 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1200 * Just return the max possible frequency for a given CPU.
1202 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1204 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1205 unsigned int ret_freq = 0;
1208 ret_freq = policy->max;
1209 cpufreq_cpu_put(policy);
1214 EXPORT_SYMBOL(cpufreq_quick_get_max);
1217 static unsigned int __cpufreq_get(unsigned int cpu)
1219 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1220 unsigned int ret_freq = 0;
1222 if (!cpufreq_driver->get)
1225 ret_freq = cpufreq_driver->get(cpu);
1227 if (ret_freq && policy->cur &&
1228 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1229 /* verify no discrepancy between actual and
1230 saved value exists */
1231 if (unlikely(ret_freq != policy->cur)) {
1232 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1233 schedule_work(&policy->update);
1241 * cpufreq_get - get the current CPU frequency (in kHz)
1244 * Get the CPU current (static) CPU frequency
1246 unsigned int cpufreq_get(unsigned int cpu)
1248 unsigned int ret_freq = 0;
1249 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1254 if (unlikely(lock_policy_rwsem_read(cpu)))
1257 ret_freq = __cpufreq_get(cpu);
1259 unlock_policy_rwsem_read(cpu);
1262 cpufreq_cpu_put(policy);
1266 EXPORT_SYMBOL(cpufreq_get);
1268 static struct subsys_interface cpufreq_interface = {
1270 .subsys = &cpu_subsys,
1271 .add_dev = cpufreq_add_dev,
1272 .remove_dev = cpufreq_remove_dev,
1277 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1279 * This function is only executed for the boot processor. The other CPUs
1280 * have been put offline by means of CPU hotplug.
1282 static int cpufreq_bp_suspend(void)
1286 int cpu = smp_processor_id();
1287 struct cpufreq_policy *cpu_policy;
1289 pr_debug("suspending cpu %u\n", cpu);
1291 /* If there's no policy for the boot CPU, we have nothing to do. */
1292 cpu_policy = cpufreq_cpu_get(cpu);
1296 if (cpufreq_driver->suspend) {
1297 ret = cpufreq_driver->suspend(cpu_policy);
1299 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1300 "step on CPU %u\n", cpu_policy->cpu);
1303 cpufreq_cpu_put(cpu_policy);
1308 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1310 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1311 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1312 * restored. It will verify that the current freq is in sync with
1313 * what we believe it to be. This is a bit later than when it
1314 * should be, but nonethteless it's better than calling
1315 * cpufreq_driver->get() here which might re-enable interrupts...
1317 * This function is only executed for the boot CPU. The other CPUs have not
1318 * been turned on yet.
1320 static void cpufreq_bp_resume(void)
1324 int cpu = smp_processor_id();
1325 struct cpufreq_policy *cpu_policy;
1327 pr_debug("resuming cpu %u\n", cpu);
1329 /* If there's no policy for the boot CPU, we have nothing to do. */
1330 cpu_policy = cpufreq_cpu_get(cpu);
1334 if (cpufreq_driver->resume) {
1335 ret = cpufreq_driver->resume(cpu_policy);
1337 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1338 "step on CPU %u\n", cpu_policy->cpu);
1343 schedule_work(&cpu_policy->update);
1346 cpufreq_cpu_put(cpu_policy);
1349 static struct syscore_ops cpufreq_syscore_ops = {
1350 .suspend = cpufreq_bp_suspend,
1351 .resume = cpufreq_bp_resume,
1355 * cpufreq_get_current_driver - return current driver's name
1357 * Return the name string of the currently loaded cpufreq driver
1360 const char *cpufreq_get_current_driver(void)
1363 return cpufreq_driver->name;
1367 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1369 /*********************************************************************
1370 * NOTIFIER LISTS INTERFACE *
1371 *********************************************************************/
1374 * cpufreq_register_notifier - register a driver with cpufreq
1375 * @nb: notifier function to register
1376 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1378 * Add a driver to one of two lists: either a list of drivers that
1379 * are notified about clock rate changes (once before and once after
1380 * the transition), or a list of drivers that are notified about
1381 * changes in cpufreq policy.
1383 * This function may sleep, and has the same return conditions as
1384 * blocking_notifier_chain_register.
1386 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1390 if (cpufreq_disabled())
1393 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1396 case CPUFREQ_TRANSITION_NOTIFIER:
1397 ret = srcu_notifier_chain_register(
1398 &cpufreq_transition_notifier_list, nb);
1400 case CPUFREQ_POLICY_NOTIFIER:
1401 ret = blocking_notifier_chain_register(
1402 &cpufreq_policy_notifier_list, nb);
1410 EXPORT_SYMBOL(cpufreq_register_notifier);
1414 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1415 * @nb: notifier block to be unregistered
1416 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1418 * Remove a driver from the CPU frequency notifier list.
1420 * This function may sleep, and has the same return conditions as
1421 * blocking_notifier_chain_unregister.
1423 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1427 if (cpufreq_disabled())
1431 case CPUFREQ_TRANSITION_NOTIFIER:
1432 ret = srcu_notifier_chain_unregister(
1433 &cpufreq_transition_notifier_list, nb);
1435 case CPUFREQ_POLICY_NOTIFIER:
1436 ret = blocking_notifier_chain_unregister(
1437 &cpufreq_policy_notifier_list, nb);
1445 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1448 /*********************************************************************
1450 *********************************************************************/
1453 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1454 unsigned int target_freq,
1455 unsigned int relation)
1457 int retval = -EINVAL;
1458 unsigned int old_target_freq = target_freq;
1460 if (cpufreq_disabled())
1463 /* Make sure that target_freq is within supported range */
1464 if (target_freq > policy->max)
1465 target_freq = policy->max;
1466 if (target_freq < policy->min)
1467 target_freq = policy->min;
1469 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1470 policy->cpu, target_freq, relation, old_target_freq);
1472 if (target_freq == policy->cur)
1475 if (cpufreq_driver->target)
1476 retval = cpufreq_driver->target(policy, target_freq, relation);
1480 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1482 int cpufreq_driver_target(struct cpufreq_policy *policy,
1483 unsigned int target_freq,
1484 unsigned int relation)
1488 policy = cpufreq_cpu_get(policy->cpu);
1492 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1495 ret = __cpufreq_driver_target(policy, target_freq, relation);
1497 unlock_policy_rwsem_write(policy->cpu);
1500 cpufreq_cpu_put(policy);
1504 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1506 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1510 if (cpufreq_disabled())
1513 if (!cpufreq_driver->getavg)
1516 policy = cpufreq_cpu_get(policy->cpu);
1520 ret = cpufreq_driver->getavg(policy, cpu);
1522 cpufreq_cpu_put(policy);
1525 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1528 * when "event" is CPUFREQ_GOV_LIMITS
1531 static int __cpufreq_governor(struct cpufreq_policy *policy,
1536 /* Only must be defined when default governor is known to have latency
1537 restrictions, like e.g. conservative or ondemand.
1538 That this is the case is already ensured in Kconfig
1540 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1541 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1543 struct cpufreq_governor *gov = NULL;
1546 if (policy->governor->max_transition_latency &&
1547 policy->cpuinfo.transition_latency >
1548 policy->governor->max_transition_latency) {
1552 printk(KERN_WARNING "%s governor failed, too long"
1553 " transition latency of HW, fallback"
1554 " to %s governor\n",
1555 policy->governor->name,
1557 policy->governor = gov;
1561 if (!try_module_get(policy->governor->owner))
1564 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1565 policy->cpu, event);
1566 ret = policy->governor->governor(policy, event);
1569 if (event == CPUFREQ_GOV_POLICY_INIT)
1570 policy->governor->initialized++;
1571 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1572 policy->governor->initialized--;
1575 /* we keep one module reference alive for
1576 each CPU governed by this CPU */
1577 if ((event != CPUFREQ_GOV_START) || ret)
1578 module_put(policy->governor->owner);
1579 if ((event == CPUFREQ_GOV_STOP) && !ret)
1580 module_put(policy->governor->owner);
1586 int cpufreq_register_governor(struct cpufreq_governor *governor)
1593 if (cpufreq_disabled())
1596 mutex_lock(&cpufreq_governor_mutex);
1598 governor->initialized = 0;
1600 if (__find_governor(governor->name) == NULL) {
1602 list_add(&governor->governor_list, &cpufreq_governor_list);
1605 mutex_unlock(&cpufreq_governor_mutex);
1608 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1611 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1613 #ifdef CONFIG_HOTPLUG_CPU
1620 if (cpufreq_disabled())
1623 #ifdef CONFIG_HOTPLUG_CPU
1624 for_each_present_cpu(cpu) {
1625 if (cpu_online(cpu))
1627 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1628 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1632 mutex_lock(&cpufreq_governor_mutex);
1633 list_del(&governor->governor_list);
1634 mutex_unlock(&cpufreq_governor_mutex);
1637 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1641 /*********************************************************************
1642 * POLICY INTERFACE *
1643 *********************************************************************/
1646 * cpufreq_get_policy - get the current cpufreq_policy
1647 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1650 * Reads the current cpufreq policy.
1652 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1654 struct cpufreq_policy *cpu_policy;
1658 cpu_policy = cpufreq_cpu_get(cpu);
1662 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1664 cpufreq_cpu_put(cpu_policy);
1667 EXPORT_SYMBOL(cpufreq_get_policy);
1671 * data : current policy.
1672 * policy : policy to be set.
1674 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1675 struct cpufreq_policy *policy)
1677 int ret = 0, failed = 1;
1679 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1680 policy->min, policy->max);
1682 memcpy(&policy->cpuinfo, &data->cpuinfo,
1683 sizeof(struct cpufreq_cpuinfo));
1685 if (policy->min > data->max || policy->max < data->min) {
1690 /* verify the cpu speed can be set within this limit */
1691 ret = cpufreq_driver->verify(policy);
1695 /* adjust if necessary - all reasons */
1696 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1697 CPUFREQ_ADJUST, policy);
1699 /* adjust if necessary - hardware incompatibility*/
1700 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1701 CPUFREQ_INCOMPATIBLE, policy);
1703 /* verify the cpu speed can be set within this limit,
1704 which might be different to the first one */
1705 ret = cpufreq_driver->verify(policy);
1709 /* notification of the new policy */
1710 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1711 CPUFREQ_NOTIFY, policy);
1713 data->min = policy->min;
1714 data->max = policy->max;
1716 pr_debug("new min and max freqs are %u - %u kHz\n",
1717 data->min, data->max);
1719 if (cpufreq_driver->setpolicy) {
1720 data->policy = policy->policy;
1721 pr_debug("setting range\n");
1722 ret = cpufreq_driver->setpolicy(policy);
1724 if (policy->governor != data->governor) {
1725 /* save old, working values */
1726 struct cpufreq_governor *old_gov = data->governor;
1728 pr_debug("governor switch\n");
1730 /* end old governor */
1731 if (data->governor) {
1732 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1733 unlock_policy_rwsem_write(policy->cpu);
1734 __cpufreq_governor(data,
1735 CPUFREQ_GOV_POLICY_EXIT);
1736 lock_policy_rwsem_write(policy->cpu);
1739 /* start new governor */
1740 data->governor = policy->governor;
1741 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1742 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1745 unlock_policy_rwsem_write(policy->cpu);
1746 __cpufreq_governor(data,
1747 CPUFREQ_GOV_POLICY_EXIT);
1748 lock_policy_rwsem_write(policy->cpu);
1753 /* new governor failed, so re-start old one */
1754 pr_debug("starting governor %s failed\n",
1755 data->governor->name);
1757 data->governor = old_gov;
1758 __cpufreq_governor(data,
1759 CPUFREQ_GOV_POLICY_INIT);
1760 __cpufreq_governor(data,
1766 /* might be a policy change, too, so fall through */
1768 pr_debug("governor: change or update limits\n");
1769 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1777 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1778 * @cpu: CPU which shall be re-evaluated
1780 * Useful for policy notifiers which have different necessities
1781 * at different times.
1783 int cpufreq_update_policy(unsigned int cpu)
1785 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1786 struct cpufreq_policy policy;
1794 if (unlikely(lock_policy_rwsem_write(cpu))) {
1799 pr_debug("updating policy for CPU %u\n", cpu);
1800 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1801 policy.min = data->user_policy.min;
1802 policy.max = data->user_policy.max;
1803 policy.policy = data->user_policy.policy;
1804 policy.governor = data->user_policy.governor;
1806 /* BIOS might change freq behind our back
1807 -> ask driver for current freq and notify governors about a change */
1808 if (cpufreq_driver->get) {
1809 policy.cur = cpufreq_driver->get(cpu);
1811 pr_debug("Driver did not initialize current freq");
1812 data->cur = policy.cur;
1814 if (data->cur != policy.cur && cpufreq_driver->target)
1815 cpufreq_out_of_sync(cpu, data->cur,
1820 ret = __cpufreq_set_policy(data, &policy);
1822 unlock_policy_rwsem_write(cpu);
1825 cpufreq_cpu_put(data);
1829 EXPORT_SYMBOL(cpufreq_update_policy);
1831 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1832 unsigned long action, void *hcpu)
1834 unsigned int cpu = (unsigned long)hcpu;
1837 dev = get_cpu_device(cpu);
1841 case CPU_ONLINE_FROZEN:
1842 cpufreq_add_dev(dev, NULL);
1844 case CPU_DOWN_PREPARE:
1845 case CPU_DOWN_PREPARE_FROZEN:
1846 __cpufreq_remove_dev(dev, NULL);
1848 case CPU_DOWN_FAILED:
1849 case CPU_DOWN_FAILED_FROZEN:
1850 cpufreq_add_dev(dev, NULL);
1857 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1858 .notifier_call = cpufreq_cpu_callback,
1861 /*********************************************************************
1862 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1863 *********************************************************************/
1866 * cpufreq_register_driver - register a CPU Frequency driver
1867 * @driver_data: A struct cpufreq_driver containing the values#
1868 * submitted by the CPU Frequency driver.
1870 * Registers a CPU Frequency driver to this core code. This code
1871 * returns zero on success, -EBUSY when another driver got here first
1872 * (and isn't unregistered in the meantime).
1875 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1877 unsigned long flags;
1880 if (cpufreq_disabled())
1883 if (!driver_data || !driver_data->verify || !driver_data->init ||
1884 ((!driver_data->setpolicy) && (!driver_data->target)))
1887 pr_debug("trying to register driver %s\n", driver_data->name);
1889 if (driver_data->setpolicy)
1890 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1892 write_lock_irqsave(&cpufreq_driver_lock, flags);
1893 if (cpufreq_driver) {
1894 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1897 cpufreq_driver = driver_data;
1898 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1900 ret = subsys_interface_register(&cpufreq_interface);
1902 goto err_null_driver;
1904 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1908 /* check for at least one working CPU */
1909 for (i = 0; i < nr_cpu_ids; i++)
1910 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1915 /* if all ->init() calls failed, unregister */
1917 pr_debug("no CPU initialized for driver %s\n",
1923 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1924 pr_debug("driver %s up and running\n", driver_data->name);
1928 subsys_interface_unregister(&cpufreq_interface);
1930 write_lock_irqsave(&cpufreq_driver_lock, flags);
1931 cpufreq_driver = NULL;
1932 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1935 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1939 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1941 * Unregister the current CPUFreq driver. Only call this if you have
1942 * the right to do so, i.e. if you have succeeded in initialising before!
1943 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1944 * currently not initialised.
1946 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1948 unsigned long flags;
1950 if (!cpufreq_driver || (driver != cpufreq_driver))
1953 pr_debug("unregistering driver %s\n", driver->name);
1955 subsys_interface_unregister(&cpufreq_interface);
1956 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1958 write_lock_irqsave(&cpufreq_driver_lock, flags);
1959 cpufreq_driver = NULL;
1960 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1964 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1966 static int __init cpufreq_core_init(void)
1970 if (cpufreq_disabled())
1973 for_each_possible_cpu(cpu) {
1974 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1975 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1978 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1979 BUG_ON(!cpufreq_global_kobject);
1980 register_syscore_ops(&cpufreq_syscore_ops);
1984 core_initcall(cpufreq_core_init);