2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
117 pure_initcall(init_cpufreq_transition_notifier_list);
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
124 void disable_cpufreq(void)
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
131 bool have_governor_per_policy(void)
133 return cpufreq_driver->have_governor_per_policy;
135 EXPORT_SYMBOL_GPL(have_governor_per_policy);
137 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
139 struct cpufreq_policy *data;
142 if (cpu >= nr_cpu_ids)
145 /* get the cpufreq driver */
146 read_lock_irqsave(&cpufreq_driver_lock, flags);
151 if (!try_module_get(cpufreq_driver->owner))
156 data = per_cpu(cpufreq_cpu_data, cpu);
159 goto err_out_put_module;
161 if (!sysfs && !kobject_get(&data->kobj))
162 goto err_out_put_module;
164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
168 module_put(cpufreq_driver->owner);
170 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
175 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
177 if (cpufreq_disabled())
180 return __cpufreq_cpu_get(cpu, false);
182 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
184 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
186 return __cpufreq_cpu_get(cpu, true);
189 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
192 kobject_put(&data->kobj);
193 module_put(cpufreq_driver->owner);
196 void cpufreq_cpu_put(struct cpufreq_policy *data)
198 if (cpufreq_disabled())
201 __cpufreq_cpu_put(data, false);
203 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
205 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
207 __cpufreq_cpu_put(data, true);
210 /*********************************************************************
211 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
212 *********************************************************************/
215 * adjust_jiffies - adjust the system "loops_per_jiffy"
217 * This function alters the system "loops_per_jiffy" for the clock
218 * speed change. Note that loops_per_jiffy cannot be updated on SMP
219 * systems as each CPU might be scaled differently. So, use the arch
220 * per-CPU loops_per_jiffy value wherever possible.
223 static unsigned long l_p_j_ref;
224 static unsigned int l_p_j_ref_freq;
226 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
228 if (ci->flags & CPUFREQ_CONST_LOOPS)
231 if (!l_p_j_ref_freq) {
232 l_p_j_ref = loops_per_jiffy;
233 l_p_j_ref_freq = ci->old;
234 pr_debug("saving %lu as reference value for loops_per_jiffy; "
235 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
237 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
238 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
239 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
241 pr_debug("scaling loops_per_jiffy to %lu "
242 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
246 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
253 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
254 struct cpufreq_freqs *freqs, unsigned int state)
256 BUG_ON(irqs_disabled());
258 if (cpufreq_disabled())
261 freqs->flags = cpufreq_driver->flags;
262 pr_debug("notification %u of frequency transition to %u kHz\n",
267 case CPUFREQ_PRECHANGE:
268 /* detect if the driver reported a value as "old frequency"
269 * which is not equal to what the cpufreq core thinks is
272 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
273 if ((policy) && (policy->cpu == freqs->cpu) &&
274 (policy->cur) && (policy->cur != freqs->old)) {
275 pr_debug("Warning: CPU frequency is"
276 " %u, cpufreq assumed %u kHz.\n",
277 freqs->old, policy->cur);
278 freqs->old = policy->cur;
281 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
282 CPUFREQ_PRECHANGE, freqs);
283 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
286 case CPUFREQ_POSTCHANGE:
287 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
288 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
289 (unsigned long)freqs->cpu);
290 trace_cpu_frequency(freqs->new, freqs->cpu);
291 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
292 CPUFREQ_POSTCHANGE, freqs);
293 if (likely(policy) && likely(policy->cpu == freqs->cpu))
294 policy->cur = freqs->new;
299 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
300 * on frequency transition.
302 * This function calls the transition notifiers and the "adjust_jiffies"
303 * function. It is called twice on all CPU frequency changes that have
306 void cpufreq_notify_transition(struct cpufreq_policy *policy,
307 struct cpufreq_freqs *freqs, unsigned int state)
309 for_each_cpu(freqs->cpu, policy->cpus)
310 __cpufreq_notify_transition(policy, freqs, state);
312 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
316 /*********************************************************************
318 *********************************************************************/
320 static struct cpufreq_governor *__find_governor(const char *str_governor)
322 struct cpufreq_governor *t;
324 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
325 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
332 * cpufreq_parse_governor - parse a governor string
334 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
335 struct cpufreq_governor **governor)
342 if (cpufreq_driver->setpolicy) {
343 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
344 *policy = CPUFREQ_POLICY_PERFORMANCE;
346 } else if (!strnicmp(str_governor, "powersave",
348 *policy = CPUFREQ_POLICY_POWERSAVE;
351 } else if (cpufreq_driver->target) {
352 struct cpufreq_governor *t;
354 mutex_lock(&cpufreq_governor_mutex);
356 t = __find_governor(str_governor);
361 mutex_unlock(&cpufreq_governor_mutex);
362 ret = request_module("cpufreq_%s", str_governor);
363 mutex_lock(&cpufreq_governor_mutex);
366 t = __find_governor(str_governor);
374 mutex_unlock(&cpufreq_governor_mutex);
382 * cpufreq_per_cpu_attr_read() / show_##file_name() -
383 * print out cpufreq information
385 * Write out information from cpufreq_driver->policy[cpu]; object must be
389 #define show_one(file_name, object) \
390 static ssize_t show_##file_name \
391 (struct cpufreq_policy *policy, char *buf) \
393 return sprintf(buf, "%u\n", policy->object); \
396 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
397 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
398 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
399 show_one(scaling_min_freq, min);
400 show_one(scaling_max_freq, max);
401 show_one(scaling_cur_freq, cur);
403 static int __cpufreq_set_policy(struct cpufreq_policy *data,
404 struct cpufreq_policy *policy);
407 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
409 #define store_one(file_name, object) \
410 static ssize_t store_##file_name \
411 (struct cpufreq_policy *policy, const char *buf, size_t count) \
414 struct cpufreq_policy new_policy; \
416 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
420 ret = sscanf(buf, "%u", &new_policy.object); \
424 ret = __cpufreq_set_policy(policy, &new_policy); \
425 policy->user_policy.object = policy->object; \
427 return ret ? ret : count; \
430 store_one(scaling_min_freq, min);
431 store_one(scaling_max_freq, max);
434 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
436 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
439 unsigned int cur_freq = __cpufreq_get(policy->cpu);
441 return sprintf(buf, "<unknown>");
442 return sprintf(buf, "%u\n", cur_freq);
447 * show_scaling_governor - show the current policy for the specified CPU
449 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
451 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
452 return sprintf(buf, "powersave\n");
453 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
454 return sprintf(buf, "performance\n");
455 else if (policy->governor)
456 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
457 policy->governor->name);
463 * store_scaling_governor - store policy for the specified CPU
465 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
466 const char *buf, size_t count)
469 char str_governor[16];
470 struct cpufreq_policy new_policy;
472 ret = cpufreq_get_policy(&new_policy, policy->cpu);
476 ret = sscanf(buf, "%15s", str_governor);
480 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
481 &new_policy.governor))
484 /* Do not use cpufreq_set_policy here or the user_policy.max
485 will be wrongly overridden */
486 ret = __cpufreq_set_policy(policy, &new_policy);
488 policy->user_policy.policy = policy->policy;
489 policy->user_policy.governor = policy->governor;
498 * show_scaling_driver - show the cpufreq driver currently loaded
500 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
502 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
506 * show_scaling_available_governors - show the available CPUfreq governors
508 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
512 struct cpufreq_governor *t;
514 if (!cpufreq_driver->target) {
515 i += sprintf(buf, "performance powersave");
519 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
520 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
521 - (CPUFREQ_NAME_LEN + 2)))
523 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
526 i += sprintf(&buf[i], "\n");
530 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
535 for_each_cpu(cpu, mask) {
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
538 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
539 if (i >= (PAGE_SIZE - 5))
542 i += sprintf(&buf[i], "\n");
547 * show_related_cpus - show the CPUs affected by each transition even if
548 * hw coordination is in use
550 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
552 return show_cpus(policy->related_cpus, buf);
556 * show_affected_cpus - show the CPUs affected by each transition
558 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
560 return show_cpus(policy->cpus, buf);
563 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
564 const char *buf, size_t count)
566 unsigned int freq = 0;
569 if (!policy->governor || !policy->governor->store_setspeed)
572 ret = sscanf(buf, "%u", &freq);
576 policy->governor->store_setspeed(policy, freq);
581 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
583 if (!policy->governor || !policy->governor->show_setspeed)
584 return sprintf(buf, "<unsupported>\n");
586 return policy->governor->show_setspeed(policy, buf);
590 * show_bios_limit - show the current cpufreq HW/BIOS limitation
592 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
596 if (cpufreq_driver->bios_limit) {
597 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
599 return sprintf(buf, "%u\n", limit);
601 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
604 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
605 cpufreq_freq_attr_ro(cpuinfo_min_freq);
606 cpufreq_freq_attr_ro(cpuinfo_max_freq);
607 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
608 cpufreq_freq_attr_ro(scaling_available_governors);
609 cpufreq_freq_attr_ro(scaling_driver);
610 cpufreq_freq_attr_ro(scaling_cur_freq);
611 cpufreq_freq_attr_ro(bios_limit);
612 cpufreq_freq_attr_ro(related_cpus);
613 cpufreq_freq_attr_ro(affected_cpus);
614 cpufreq_freq_attr_rw(scaling_min_freq);
615 cpufreq_freq_attr_rw(scaling_max_freq);
616 cpufreq_freq_attr_rw(scaling_governor);
617 cpufreq_freq_attr_rw(scaling_setspeed);
619 static struct attribute *default_attrs[] = {
620 &cpuinfo_min_freq.attr,
621 &cpuinfo_max_freq.attr,
622 &cpuinfo_transition_latency.attr,
623 &scaling_min_freq.attr,
624 &scaling_max_freq.attr,
627 &scaling_governor.attr,
628 &scaling_driver.attr,
629 &scaling_available_governors.attr,
630 &scaling_setspeed.attr,
634 struct kobject *cpufreq_global_kobject;
635 EXPORT_SYMBOL(cpufreq_global_kobject);
637 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
638 #define to_attr(a) container_of(a, struct freq_attr, attr)
640 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
642 struct cpufreq_policy *policy = to_policy(kobj);
643 struct freq_attr *fattr = to_attr(attr);
644 ssize_t ret = -EINVAL;
645 policy = cpufreq_cpu_get_sysfs(policy->cpu);
649 if (lock_policy_rwsem_read(policy->cpu) < 0)
653 ret = fattr->show(policy, buf);
657 unlock_policy_rwsem_read(policy->cpu);
659 cpufreq_cpu_put_sysfs(policy);
664 static ssize_t store(struct kobject *kobj, struct attribute *attr,
665 const char *buf, size_t count)
667 struct cpufreq_policy *policy = to_policy(kobj);
668 struct freq_attr *fattr = to_attr(attr);
669 ssize_t ret = -EINVAL;
670 policy = cpufreq_cpu_get_sysfs(policy->cpu);
674 if (lock_policy_rwsem_write(policy->cpu) < 0)
678 ret = fattr->store(policy, buf, count);
682 unlock_policy_rwsem_write(policy->cpu);
684 cpufreq_cpu_put_sysfs(policy);
689 static void cpufreq_sysfs_release(struct kobject *kobj)
691 struct cpufreq_policy *policy = to_policy(kobj);
692 pr_debug("last reference is dropped\n");
693 complete(&policy->kobj_unregister);
696 static const struct sysfs_ops sysfs_ops = {
701 static struct kobj_type ktype_cpufreq = {
702 .sysfs_ops = &sysfs_ops,
703 .default_attrs = default_attrs,
704 .release = cpufreq_sysfs_release,
707 /* symlink affected CPUs */
708 static int cpufreq_add_dev_symlink(unsigned int cpu,
709 struct cpufreq_policy *policy)
714 for_each_cpu(j, policy->cpus) {
715 struct cpufreq_policy *managed_policy;
716 struct device *cpu_dev;
721 pr_debug("CPU %u already managed, adding link\n", j);
722 managed_policy = cpufreq_cpu_get(cpu);
723 cpu_dev = get_cpu_device(j);
724 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
727 cpufreq_cpu_put(managed_policy);
734 static int cpufreq_add_dev_interface(unsigned int cpu,
735 struct cpufreq_policy *policy,
738 struct cpufreq_policy new_policy;
739 struct freq_attr **drv_attr;
744 /* prepare interface data */
745 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
746 &dev->kobj, "cpufreq");
750 /* set up files for this cpu device */
751 drv_attr = cpufreq_driver->attr;
752 while ((drv_attr) && (*drv_attr)) {
753 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
755 goto err_out_kobj_put;
758 if (cpufreq_driver->get) {
759 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
761 goto err_out_kobj_put;
763 if (cpufreq_driver->target) {
764 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
766 goto err_out_kobj_put;
768 if (cpufreq_driver->bios_limit) {
769 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
771 goto err_out_kobj_put;
774 write_lock_irqsave(&cpufreq_driver_lock, flags);
775 for_each_cpu(j, policy->cpus) {
776 per_cpu(cpufreq_cpu_data, j) = policy;
777 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
779 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
781 ret = cpufreq_add_dev_symlink(cpu, policy);
783 goto err_out_kobj_put;
785 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
786 /* assure that the starting sequence is run in __cpufreq_set_policy */
787 policy->governor = NULL;
789 /* set default policy */
790 ret = __cpufreq_set_policy(policy, &new_policy);
791 policy->user_policy.policy = policy->policy;
792 policy->user_policy.governor = policy->governor;
795 pr_debug("setting policy failed\n");
796 if (cpufreq_driver->exit)
797 cpufreq_driver->exit(policy);
802 kobject_put(&policy->kobj);
803 wait_for_completion(&policy->kobj_unregister);
807 #ifdef CONFIG_HOTPLUG_CPU
808 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
811 struct cpufreq_policy *policy;
812 int ret = 0, has_target = !!cpufreq_driver->target;
815 policy = cpufreq_cpu_get(sibling);
819 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
821 lock_policy_rwsem_write(sibling);
823 write_lock_irqsave(&cpufreq_driver_lock, flags);
825 cpumask_set_cpu(cpu, policy->cpus);
826 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
827 per_cpu(cpufreq_cpu_data, cpu) = policy;
828 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
830 unlock_policy_rwsem_write(sibling);
833 __cpufreq_governor(policy, CPUFREQ_GOV_START);
834 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
837 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
839 cpufreq_cpu_put(policy);
848 * cpufreq_add_dev - add a CPU device
850 * Adds the cpufreq interface for a CPU device.
852 * The Oracle says: try running cpufreq registration/unregistration concurrently
853 * with with cpu hotplugging and all hell will break loose. Tried to clean this
854 * mess up, but more thorough testing is needed. - Mathieu
856 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
858 unsigned int j, cpu = dev->id;
860 struct cpufreq_policy *policy;
862 #ifdef CONFIG_HOTPLUG_CPU
863 struct cpufreq_governor *gov;
867 if (cpu_is_offline(cpu))
870 pr_debug("adding CPU %u\n", cpu);
873 /* check whether a different CPU already registered this
874 * CPU because it is in the same boat. */
875 policy = cpufreq_cpu_get(cpu);
876 if (unlikely(policy)) {
877 cpufreq_cpu_put(policy);
881 #ifdef CONFIG_HOTPLUG_CPU
882 /* Check if this cpu was hot-unplugged earlier and has siblings */
883 read_lock_irqsave(&cpufreq_driver_lock, flags);
884 for_each_online_cpu(sibling) {
885 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
886 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
887 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
888 return cpufreq_add_policy_cpu(cpu, sibling, dev);
891 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
895 if (!try_module_get(cpufreq_driver->owner)) {
900 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
904 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
905 goto err_free_policy;
907 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
908 goto err_free_cpumask;
911 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
912 cpumask_copy(policy->cpus, cpumask_of(cpu));
914 /* Initially set CPU itself as the policy_cpu */
915 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
917 init_completion(&policy->kobj_unregister);
918 INIT_WORK(&policy->update, handle_update);
920 /* call driver. From then on the cpufreq must be able
921 * to accept all calls to ->verify and ->setpolicy for this CPU
923 ret = cpufreq_driver->init(policy);
925 pr_debug("initialization failed\n");
926 goto err_set_policy_cpu;
929 /* related cpus should atleast have policy->cpus */
930 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
933 * affected cpus must always be the one, which are online. We aren't
934 * managing offline cpus here.
936 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
938 policy->user_policy.min = policy->min;
939 policy->user_policy.max = policy->max;
941 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
942 CPUFREQ_START, policy);
944 #ifdef CONFIG_HOTPLUG_CPU
945 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
947 policy->governor = gov;
948 pr_debug("Restoring governor %s for cpu %d\n",
949 policy->governor->name, cpu);
953 ret = cpufreq_add_dev_interface(cpu, policy, dev);
955 goto err_out_unregister;
957 kobject_uevent(&policy->kobj, KOBJ_ADD);
958 module_put(cpufreq_driver->owner);
959 pr_debug("initialization complete\n");
964 write_lock_irqsave(&cpufreq_driver_lock, flags);
965 for_each_cpu(j, policy->cpus)
966 per_cpu(cpufreq_cpu_data, j) = NULL;
967 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
969 kobject_put(&policy->kobj);
970 wait_for_completion(&policy->kobj_unregister);
973 per_cpu(cpufreq_policy_cpu, cpu) = -1;
974 free_cpumask_var(policy->related_cpus);
976 free_cpumask_var(policy->cpus);
980 module_put(cpufreq_driver->owner);
985 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
989 policy->last_cpu = policy->cpu;
992 for_each_cpu(j, policy->cpus)
993 per_cpu(cpufreq_policy_cpu, j) = cpu;
995 #ifdef CONFIG_CPU_FREQ_TABLE
996 cpufreq_frequency_table_update_policy_cpu(policy);
998 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
999 CPUFREQ_UPDATE_POLICY_CPU, policy);
1003 * __cpufreq_remove_dev - remove a CPU device
1005 * Removes the cpufreq interface for a CPU device.
1006 * Caller should already have policy_rwsem in write mode for this CPU.
1007 * This routine frees the rwsem before returning.
1009 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1011 unsigned int cpu = dev->id, ret, cpus;
1012 unsigned long flags;
1013 struct cpufreq_policy *data;
1014 struct kobject *kobj;
1015 struct completion *cmp;
1016 struct device *cpu_dev;
1018 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1020 write_lock_irqsave(&cpufreq_driver_lock, flags);
1022 data = per_cpu(cpufreq_cpu_data, cpu);
1023 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1025 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1028 pr_debug("%s: No cpu_data found\n", __func__);
1032 if (cpufreq_driver->target)
1033 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1035 #ifdef CONFIG_HOTPLUG_CPU
1036 if (!cpufreq_driver->setpolicy)
1037 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1038 data->governor->name, CPUFREQ_NAME_LEN);
1041 WARN_ON(lock_policy_rwsem_write(cpu));
1042 cpus = cpumask_weight(data->cpus);
1045 cpumask_clear_cpu(cpu, data->cpus);
1046 unlock_policy_rwsem_write(cpu);
1048 if (cpu != data->cpu) {
1049 sysfs_remove_link(&dev->kobj, "cpufreq");
1050 } else if (cpus > 1) {
1051 /* first sibling now owns the new sysfs dir */
1052 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1053 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1054 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1056 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1058 WARN_ON(lock_policy_rwsem_write(cpu));
1059 cpumask_set_cpu(cpu, data->cpus);
1061 write_lock_irqsave(&cpufreq_driver_lock, flags);
1062 per_cpu(cpufreq_cpu_data, cpu) = data;
1063 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1065 unlock_policy_rwsem_write(cpu);
1067 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1072 WARN_ON(lock_policy_rwsem_write(cpu));
1073 update_policy_cpu(data, cpu_dev->id);
1074 unlock_policy_rwsem_write(cpu);
1075 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1076 __func__, cpu_dev->id, cpu);
1079 if ((cpus == 1) && (cpufreq_driver->target))
1080 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1082 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1083 cpufreq_cpu_put(data);
1085 /* If cpu is last user of policy, free policy */
1087 lock_policy_rwsem_read(cpu);
1089 cmp = &data->kobj_unregister;
1090 unlock_policy_rwsem_read(cpu);
1093 /* we need to make sure that the underlying kobj is actually
1094 * not referenced anymore by anybody before we proceed with
1097 pr_debug("waiting for dropping of refcount\n");
1098 wait_for_completion(cmp);
1099 pr_debug("wait complete\n");
1101 if (cpufreq_driver->exit)
1102 cpufreq_driver->exit(data);
1104 free_cpumask_var(data->related_cpus);
1105 free_cpumask_var(data->cpus);
1107 } else if (cpufreq_driver->target) {
1108 __cpufreq_governor(data, CPUFREQ_GOV_START);
1109 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1112 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1117 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1119 unsigned int cpu = dev->id;
1122 if (cpu_is_offline(cpu))
1125 retval = __cpufreq_remove_dev(dev, sif);
1130 static void handle_update(struct work_struct *work)
1132 struct cpufreq_policy *policy =
1133 container_of(work, struct cpufreq_policy, update);
1134 unsigned int cpu = policy->cpu;
1135 pr_debug("handle_update for cpu %u called\n", cpu);
1136 cpufreq_update_policy(cpu);
1140 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1142 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1143 * @new_freq: CPU frequency the CPU actually runs at
1145 * We adjust to current frequency first, and need to clean up later.
1146 * So either call to cpufreq_update_policy() or schedule handle_update()).
1148 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1149 unsigned int new_freq)
1151 struct cpufreq_policy *policy;
1152 struct cpufreq_freqs freqs;
1153 unsigned long flags;
1156 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1157 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1159 freqs.old = old_freq;
1160 freqs.new = new_freq;
1162 read_lock_irqsave(&cpufreq_driver_lock, flags);
1163 policy = per_cpu(cpufreq_cpu_data, cpu);
1164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1167 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1172 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1175 * This is the last known freq, without actually getting it from the driver.
1176 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1178 unsigned int cpufreq_quick_get(unsigned int cpu)
1180 struct cpufreq_policy *policy;
1181 unsigned int ret_freq = 0;
1183 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1184 return cpufreq_driver->get(cpu);
1186 policy = cpufreq_cpu_get(cpu);
1188 ret_freq = policy->cur;
1189 cpufreq_cpu_put(policy);
1194 EXPORT_SYMBOL(cpufreq_quick_get);
1197 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1200 * Just return the max possible frequency for a given CPU.
1202 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1204 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1205 unsigned int ret_freq = 0;
1208 ret_freq = policy->max;
1209 cpufreq_cpu_put(policy);
1214 EXPORT_SYMBOL(cpufreq_quick_get_max);
1217 static unsigned int __cpufreq_get(unsigned int cpu)
1219 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1220 unsigned int ret_freq = 0;
1222 if (!cpufreq_driver->get)
1225 ret_freq = cpufreq_driver->get(cpu);
1227 if (ret_freq && policy->cur &&
1228 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1229 /* verify no discrepancy between actual and
1230 saved value exists */
1231 if (unlikely(ret_freq != policy->cur)) {
1232 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1233 schedule_work(&policy->update);
1241 * cpufreq_get - get the current CPU frequency (in kHz)
1244 * Get the CPU current (static) CPU frequency
1246 unsigned int cpufreq_get(unsigned int cpu)
1248 unsigned int ret_freq = 0;
1249 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1254 if (unlikely(lock_policy_rwsem_read(cpu)))
1257 ret_freq = __cpufreq_get(cpu);
1259 unlock_policy_rwsem_read(cpu);
1262 cpufreq_cpu_put(policy);
1266 EXPORT_SYMBOL(cpufreq_get);
1268 static struct subsys_interface cpufreq_interface = {
1270 .subsys = &cpu_subsys,
1271 .add_dev = cpufreq_add_dev,
1272 .remove_dev = cpufreq_remove_dev,
1277 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1279 * This function is only executed for the boot processor. The other CPUs
1280 * have been put offline by means of CPU hotplug.
1282 static int cpufreq_bp_suspend(void)
1286 int cpu = smp_processor_id();
1287 struct cpufreq_policy *cpu_policy;
1289 pr_debug("suspending cpu %u\n", cpu);
1291 /* If there's no policy for the boot CPU, we have nothing to do. */
1292 cpu_policy = cpufreq_cpu_get(cpu);
1296 if (cpufreq_driver->suspend) {
1297 ret = cpufreq_driver->suspend(cpu_policy);
1299 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1300 "step on CPU %u\n", cpu_policy->cpu);
1303 cpufreq_cpu_put(cpu_policy);
1308 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1310 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1311 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1312 * restored. It will verify that the current freq is in sync with
1313 * what we believe it to be. This is a bit later than when it
1314 * should be, but nonethteless it's better than calling
1315 * cpufreq_driver->get() here which might re-enable interrupts...
1317 * This function is only executed for the boot CPU. The other CPUs have not
1318 * been turned on yet.
1320 static void cpufreq_bp_resume(void)
1324 int cpu = smp_processor_id();
1325 struct cpufreq_policy *cpu_policy;
1327 pr_debug("resuming cpu %u\n", cpu);
1329 /* If there's no policy for the boot CPU, we have nothing to do. */
1330 cpu_policy = cpufreq_cpu_get(cpu);
1334 if (cpufreq_driver->resume) {
1335 ret = cpufreq_driver->resume(cpu_policy);
1337 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1338 "step on CPU %u\n", cpu_policy->cpu);
1343 schedule_work(&cpu_policy->update);
1346 cpufreq_cpu_put(cpu_policy);
1349 static struct syscore_ops cpufreq_syscore_ops = {
1350 .suspend = cpufreq_bp_suspend,
1351 .resume = cpufreq_bp_resume,
1355 * cpufreq_get_current_driver - return current driver's name
1357 * Return the name string of the currently loaded cpufreq driver
1360 const char *cpufreq_get_current_driver(void)
1363 return cpufreq_driver->name;
1367 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1369 /*********************************************************************
1370 * NOTIFIER LISTS INTERFACE *
1371 *********************************************************************/
1374 * cpufreq_register_notifier - register a driver with cpufreq
1375 * @nb: notifier function to register
1376 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1378 * Add a driver to one of two lists: either a list of drivers that
1379 * are notified about clock rate changes (once before and once after
1380 * the transition), or a list of drivers that are notified about
1381 * changes in cpufreq policy.
1383 * This function may sleep, and has the same return conditions as
1384 * blocking_notifier_chain_register.
1386 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1390 if (cpufreq_disabled())
1393 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1396 case CPUFREQ_TRANSITION_NOTIFIER:
1397 ret = srcu_notifier_chain_register(
1398 &cpufreq_transition_notifier_list, nb);
1400 case CPUFREQ_POLICY_NOTIFIER:
1401 ret = blocking_notifier_chain_register(
1402 &cpufreq_policy_notifier_list, nb);
1410 EXPORT_SYMBOL(cpufreq_register_notifier);
1414 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1415 * @nb: notifier block to be unregistered
1416 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1418 * Remove a driver from the CPU frequency notifier list.
1420 * This function may sleep, and has the same return conditions as
1421 * blocking_notifier_chain_unregister.
1423 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1427 if (cpufreq_disabled())
1431 case CPUFREQ_TRANSITION_NOTIFIER:
1432 ret = srcu_notifier_chain_unregister(
1433 &cpufreq_transition_notifier_list, nb);
1435 case CPUFREQ_POLICY_NOTIFIER:
1436 ret = blocking_notifier_chain_unregister(
1437 &cpufreq_policy_notifier_list, nb);
1445 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1448 /*********************************************************************
1450 *********************************************************************/
1453 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1454 unsigned int target_freq,
1455 unsigned int relation)
1457 int retval = -EINVAL;
1458 unsigned int old_target_freq = target_freq;
1460 if (cpufreq_disabled())
1463 /* Make sure that target_freq is within supported range */
1464 if (target_freq > policy->max)
1465 target_freq = policy->max;
1466 if (target_freq < policy->min)
1467 target_freq = policy->min;
1469 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1470 policy->cpu, target_freq, relation, old_target_freq);
1472 if (target_freq == policy->cur)
1475 if (cpufreq_driver->target)
1476 retval = cpufreq_driver->target(policy, target_freq, relation);
1480 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1482 int cpufreq_driver_target(struct cpufreq_policy *policy,
1483 unsigned int target_freq,
1484 unsigned int relation)
1488 policy = cpufreq_cpu_get(policy->cpu);
1492 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1495 ret = __cpufreq_driver_target(policy, target_freq, relation);
1497 unlock_policy_rwsem_write(policy->cpu);
1500 cpufreq_cpu_put(policy);
1504 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1506 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1510 if (cpufreq_disabled())
1513 if (!cpufreq_driver->getavg)
1516 policy = cpufreq_cpu_get(policy->cpu);
1520 ret = cpufreq_driver->getavg(policy, cpu);
1522 cpufreq_cpu_put(policy);
1525 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1528 * when "event" is CPUFREQ_GOV_LIMITS
1531 static int __cpufreq_governor(struct cpufreq_policy *policy,
1536 /* Only must be defined when default governor is known to have latency
1537 restrictions, like e.g. conservative or ondemand.
1538 That this is the case is already ensured in Kconfig
1540 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1541 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1543 struct cpufreq_governor *gov = NULL;
1546 if (policy->governor->max_transition_latency &&
1547 policy->cpuinfo.transition_latency >
1548 policy->governor->max_transition_latency) {
1552 printk(KERN_WARNING "%s governor failed, too long"
1553 " transition latency of HW, fallback"
1554 " to %s governor\n",
1555 policy->governor->name,
1557 policy->governor = gov;
1561 if (!try_module_get(policy->governor->owner))
1564 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1565 policy->cpu, event);
1566 ret = policy->governor->governor(policy, event);
1569 if (event == CPUFREQ_GOV_POLICY_INIT)
1570 policy->governor->initialized++;
1571 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1572 policy->governor->initialized--;
1575 /* we keep one module reference alive for
1576 each CPU governed by this CPU */
1577 if ((event != CPUFREQ_GOV_START) || ret)
1578 module_put(policy->governor->owner);
1579 if ((event == CPUFREQ_GOV_STOP) && !ret)
1580 module_put(policy->governor->owner);
1586 int cpufreq_register_governor(struct cpufreq_governor *governor)
1593 if (cpufreq_disabled())
1596 mutex_lock(&cpufreq_governor_mutex);
1598 governor->initialized = 0;
1600 if (__find_governor(governor->name) == NULL) {
1602 list_add(&governor->governor_list, &cpufreq_governor_list);
1605 mutex_unlock(&cpufreq_governor_mutex);
1608 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1611 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1613 #ifdef CONFIG_HOTPLUG_CPU
1620 if (cpufreq_disabled())
1623 #ifdef CONFIG_HOTPLUG_CPU
1624 for_each_present_cpu(cpu) {
1625 if (cpu_online(cpu))
1627 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1628 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1632 mutex_lock(&cpufreq_governor_mutex);
1633 list_del(&governor->governor_list);
1634 mutex_unlock(&cpufreq_governor_mutex);
1637 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1641 /*********************************************************************
1642 * POLICY INTERFACE *
1643 *********************************************************************/
1646 * cpufreq_get_policy - get the current cpufreq_policy
1647 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1650 * Reads the current cpufreq policy.
1652 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1654 struct cpufreq_policy *cpu_policy;
1658 cpu_policy = cpufreq_cpu_get(cpu);
1662 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1664 cpufreq_cpu_put(cpu_policy);
1667 EXPORT_SYMBOL(cpufreq_get_policy);
1671 * data : current policy.
1672 * policy : policy to be set.
1674 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1675 struct cpufreq_policy *policy)
1677 int ret = 0, failed = 1;
1679 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1680 policy->min, policy->max);
1682 memcpy(&policy->cpuinfo, &data->cpuinfo,
1683 sizeof(struct cpufreq_cpuinfo));
1685 if (policy->min > data->max || policy->max < data->min) {
1690 /* verify the cpu speed can be set within this limit */
1691 ret = cpufreq_driver->verify(policy);
1695 /* adjust if necessary - all reasons */
1696 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1697 CPUFREQ_ADJUST, policy);
1699 /* adjust if necessary - hardware incompatibility*/
1700 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1701 CPUFREQ_INCOMPATIBLE, policy);
1703 /* verify the cpu speed can be set within this limit,
1704 which might be different to the first one */
1705 ret = cpufreq_driver->verify(policy);
1709 /* notification of the new policy */
1710 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1711 CPUFREQ_NOTIFY, policy);
1713 data->min = policy->min;
1714 data->max = policy->max;
1716 pr_debug("new min and max freqs are %u - %u kHz\n",
1717 data->min, data->max);
1719 if (cpufreq_driver->setpolicy) {
1720 data->policy = policy->policy;
1721 pr_debug("setting range\n");
1722 ret = cpufreq_driver->setpolicy(policy);
1724 if (policy->governor != data->governor) {
1725 /* save old, working values */
1726 struct cpufreq_governor *old_gov = data->governor;
1728 pr_debug("governor switch\n");
1730 /* end old governor */
1731 if (data->governor) {
1732 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1733 unlock_policy_rwsem_write(policy->cpu);
1734 __cpufreq_governor(data,
1735 CPUFREQ_GOV_POLICY_EXIT);
1736 lock_policy_rwsem_write(policy->cpu);
1739 /* start new governor */
1740 data->governor = policy->governor;
1741 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1742 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1745 unlock_policy_rwsem_write(policy->cpu);
1746 __cpufreq_governor(data,
1747 CPUFREQ_GOV_POLICY_EXIT);
1748 lock_policy_rwsem_write(policy->cpu);
1753 /* new governor failed, so re-start old one */
1754 pr_debug("starting governor %s failed\n",
1755 data->governor->name);
1757 data->governor = old_gov;
1758 __cpufreq_governor(data,
1759 CPUFREQ_GOV_POLICY_INIT);
1760 __cpufreq_governor(data,
1766 /* might be a policy change, too, so fall through */
1768 pr_debug("governor: change or update limits\n");
1769 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1777 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1778 * @cpu: CPU which shall be re-evaluated
1780 * Useful for policy notifiers which have different necessities
1781 * at different times.
1783 int cpufreq_update_policy(unsigned int cpu)
1785 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1786 struct cpufreq_policy policy;
1794 if (unlikely(lock_policy_rwsem_write(cpu))) {
1799 pr_debug("updating policy for CPU %u\n", cpu);
1800 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1801 policy.min = data->user_policy.min;
1802 policy.max = data->user_policy.max;
1803 policy.policy = data->user_policy.policy;
1804 policy.governor = data->user_policy.governor;
1806 /* BIOS might change freq behind our back
1807 -> ask driver for current freq and notify governors about a change */
1808 if (cpufreq_driver->get) {
1809 policy.cur = cpufreq_driver->get(cpu);
1811 pr_debug("Driver did not initialize current freq");
1812 data->cur = policy.cur;
1814 if (data->cur != policy.cur && cpufreq_driver->target)
1815 cpufreq_out_of_sync(cpu, data->cur,
1820 ret = __cpufreq_set_policy(data, &policy);
1822 unlock_policy_rwsem_write(cpu);
1825 cpufreq_cpu_put(data);
1829 EXPORT_SYMBOL(cpufreq_update_policy);
1831 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1832 unsigned long action, void *hcpu)
1834 unsigned int cpu = (unsigned long)hcpu;
1837 dev = get_cpu_device(cpu);
1841 cpufreq_add_dev(dev, NULL);
1843 case CPU_DOWN_PREPARE:
1844 case CPU_UP_CANCELED_FROZEN:
1845 __cpufreq_remove_dev(dev, NULL);
1847 case CPU_DOWN_FAILED:
1848 cpufreq_add_dev(dev, NULL);
1855 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1856 .notifier_call = cpufreq_cpu_callback,
1859 /*********************************************************************
1860 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1861 *********************************************************************/
1864 * cpufreq_register_driver - register a CPU Frequency driver
1865 * @driver_data: A struct cpufreq_driver containing the values#
1866 * submitted by the CPU Frequency driver.
1868 * Registers a CPU Frequency driver to this core code. This code
1869 * returns zero on success, -EBUSY when another driver got here first
1870 * (and isn't unregistered in the meantime).
1873 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1875 unsigned long flags;
1878 if (cpufreq_disabled())
1881 if (!driver_data || !driver_data->verify || !driver_data->init ||
1882 ((!driver_data->setpolicy) && (!driver_data->target)))
1885 pr_debug("trying to register driver %s\n", driver_data->name);
1887 if (driver_data->setpolicy)
1888 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1890 write_lock_irqsave(&cpufreq_driver_lock, flags);
1891 if (cpufreq_driver) {
1892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1895 cpufreq_driver = driver_data;
1896 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1898 ret = subsys_interface_register(&cpufreq_interface);
1900 goto err_null_driver;
1902 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1906 /* check for at least one working CPU */
1907 for (i = 0; i < nr_cpu_ids; i++)
1908 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1913 /* if all ->init() calls failed, unregister */
1915 pr_debug("no CPU initialized for driver %s\n",
1921 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1922 pr_debug("driver %s up and running\n", driver_data->name);
1926 subsys_interface_unregister(&cpufreq_interface);
1928 write_lock_irqsave(&cpufreq_driver_lock, flags);
1929 cpufreq_driver = NULL;
1930 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1933 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1937 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1939 * Unregister the current CPUFreq driver. Only call this if you have
1940 * the right to do so, i.e. if you have succeeded in initialising before!
1941 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1942 * currently not initialised.
1944 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1946 unsigned long flags;
1948 if (!cpufreq_driver || (driver != cpufreq_driver))
1951 pr_debug("unregistering driver %s\n", driver->name);
1953 subsys_interface_unregister(&cpufreq_interface);
1954 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1956 write_lock_irqsave(&cpufreq_driver_lock, flags);
1957 cpufreq_driver = NULL;
1958 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1962 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1964 static int __init cpufreq_core_init(void)
1968 if (cpufreq_disabled())
1971 for_each_possible_cpu(cpu) {
1972 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1973 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1976 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1977 BUG_ON(!cpufreq_global_kobject);
1978 register_syscore_ops(&cpufreq_syscore_ops);
1982 core_initcall(cpufreq_core_init);