2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/suspend.h>
35 #include <linux/tick.h>
37 #include <trace/events/power.h>
40 * The "cpufreq driver" - the arch- or hardware-dependent low
41 * level driver of CPUFreq support, and its spinlock. This lock
42 * also protects the cpufreq_cpu_data array.
44 static struct cpufreq_driver *cpufreq_driver;
45 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
46 #ifdef CONFIG_HOTPLUG_CPU
47 /* This one keeps track of the previously set governor of a removed CPU */
48 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 static DEFINE_MUTEX(cpufreq_governor_lock);
53 /* Flag to suspend/resume CPUFreq governors */
54 static bool cpufreq_suspended;
56 static inline bool has_target(void)
58 return cpufreq_driver->target;
62 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
63 * all cpufreq/hotplug/workqueue/etc related lock issues.
65 * The rules for this semaphore:
66 * - Any routine that wants to read from the policy structure will
67 * do a down_read on this semaphore.
68 * - Any routine that will write to the policy structure and/or may take away
69 * the policy altogether (eg. CPU hotplug), will hold this lock in write
70 * mode before doing so.
73 * - Governor routines that can be called in cpufreq hotplug path should not
74 * take this sem as top level hotplug notifier handler takes this.
75 * - Lock should not be held across
76 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
78 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
79 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
81 #define lock_policy_rwsem(mode, cpu) \
82 static int lock_policy_rwsem_##mode(int cpu) \
84 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
85 BUG_ON(policy_cpu == -1); \
86 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 lock_policy_rwsem(read, cpu);
92 lock_policy_rwsem(write, cpu);
94 #define unlock_policy_rwsem(mode, cpu) \
95 static void unlock_policy_rwsem_##mode(int cpu) \
97 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
98 BUG_ON(policy_cpu == -1); \
99 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
102 unlock_policy_rwsem(read, cpu);
103 unlock_policy_rwsem(write, cpu);
105 /* internal prototypes */
106 static int __cpufreq_governor(struct cpufreq_policy *policy,
108 static unsigned int __cpufreq_get(unsigned int cpu);
109 static void handle_update(struct work_struct *work);
112 * Two notifier lists: the "policy" list is involved in the
113 * validation process for a new CPU frequency policy; the
114 * "transition" list for kernel code that needs to handle
115 * changes to devices when the CPU clock speed changes.
116 * The mutex locks both lists.
118 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
119 static struct srcu_notifier_head cpufreq_transition_notifier_list;
121 static bool init_cpufreq_transition_notifier_list_called;
122 static int __init init_cpufreq_transition_notifier_list(void)
124 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
125 init_cpufreq_transition_notifier_list_called = true;
128 pure_initcall(init_cpufreq_transition_notifier_list);
130 static int off __read_mostly;
131 static int cpufreq_disabled(void)
135 void disable_cpufreq(void)
139 static LIST_HEAD(cpufreq_governor_list);
140 static DEFINE_MUTEX(cpufreq_governor_mutex);
142 bool have_governor_per_policy(void)
144 return cpufreq_driver->have_governor_per_policy;
147 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
149 struct cpufreq_policy *data;
152 if (cpu >= nr_cpu_ids)
155 /* get the cpufreq driver */
156 read_lock_irqsave(&cpufreq_driver_lock, flags);
161 if (!try_module_get(cpufreq_driver->owner))
166 data = per_cpu(cpufreq_cpu_data, cpu);
169 goto err_out_put_module;
171 if (!sysfs && !kobject_get(&data->kobj))
172 goto err_out_put_module;
174 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
178 module_put(cpufreq_driver->owner);
180 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
185 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
187 if (cpufreq_disabled())
190 return __cpufreq_cpu_get(cpu, false);
192 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
194 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
196 return __cpufreq_cpu_get(cpu, true);
199 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
202 kobject_put(&data->kobj);
203 module_put(cpufreq_driver->owner);
206 void cpufreq_cpu_put(struct cpufreq_policy *data)
208 if (cpufreq_disabled())
211 __cpufreq_cpu_put(data, false);
213 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
215 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
217 __cpufreq_cpu_put(data, true);
220 /*********************************************************************
221 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
222 *********************************************************************/
225 * adjust_jiffies - adjust the system "loops_per_jiffy"
227 * This function alters the system "loops_per_jiffy" for the clock
228 * speed change. Note that loops_per_jiffy cannot be updated on SMP
229 * systems as each CPU might be scaled differently. So, use the arch
230 * per-CPU loops_per_jiffy value wherever possible.
233 static unsigned long l_p_j_ref;
234 static unsigned int l_p_j_ref_freq;
236 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
238 if (ci->flags & CPUFREQ_CONST_LOOPS)
241 if (!l_p_j_ref_freq) {
242 l_p_j_ref = loops_per_jiffy;
243 l_p_j_ref_freq = ci->old;
244 pr_debug("saving %lu as reference value for loops_per_jiffy; "
245 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
247 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
248 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
249 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
251 pr_debug("scaling loops_per_jiffy to %lu "
252 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
256 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
263 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
264 struct cpufreq_freqs *freqs, unsigned int state)
266 BUG_ON(irqs_disabled());
268 if (cpufreq_disabled())
271 freqs->flags = cpufreq_driver->flags;
272 pr_debug("notification %u of frequency transition to %u kHz\n",
277 case CPUFREQ_PRECHANGE:
278 /* detect if the driver reported a value as "old frequency"
279 * which is not equal to what the cpufreq core thinks is
282 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
283 if ((policy) && (policy->cpu == freqs->cpu) &&
284 (policy->cur) && (policy->cur != freqs->old)) {
285 pr_debug("Warning: CPU frequency is"
286 " %u, cpufreq assumed %u kHz.\n",
287 freqs->old, policy->cur);
288 freqs->old = policy->cur;
291 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
292 CPUFREQ_PRECHANGE, freqs);
293 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
296 case CPUFREQ_POSTCHANGE:
297 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
298 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
299 (unsigned long)freqs->cpu);
300 trace_cpu_frequency(freqs->new, freqs->cpu);
301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
302 CPUFREQ_POSTCHANGE, freqs);
303 if (likely(policy) && likely(policy->cpu == freqs->cpu))
304 policy->cur = freqs->new;
309 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
310 * on frequency transition.
312 * This function calls the transition notifiers and the "adjust_jiffies"
313 * function. It is called twice on all CPU frequency changes that have
316 void cpufreq_notify_transition(struct cpufreq_policy *policy,
317 struct cpufreq_freqs *freqs, unsigned int state)
319 for_each_cpu(freqs->cpu, policy->cpus)
320 __cpufreq_notify_transition(policy, freqs, state);
322 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326 /*********************************************************************
328 *********************************************************************/
330 static struct cpufreq_governor *__find_governor(const char *str_governor)
332 struct cpufreq_governor *t;
334 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
335 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
342 * cpufreq_parse_governor - parse a governor string
344 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
345 struct cpufreq_governor **governor)
352 if (cpufreq_driver->setpolicy) {
353 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
354 *policy = CPUFREQ_POLICY_PERFORMANCE;
356 } else if (!strnicmp(str_governor, "powersave",
358 *policy = CPUFREQ_POLICY_POWERSAVE;
361 } else if (cpufreq_driver->target) {
362 struct cpufreq_governor *t;
364 mutex_lock(&cpufreq_governor_mutex);
366 t = __find_governor(str_governor);
371 mutex_unlock(&cpufreq_governor_mutex);
372 ret = request_module("cpufreq_%s", str_governor);
373 mutex_lock(&cpufreq_governor_mutex);
376 t = __find_governor(str_governor);
384 mutex_unlock(&cpufreq_governor_mutex);
392 * cpufreq_per_cpu_attr_read() / show_##file_name() -
393 * print out cpufreq information
395 * Write out information from cpufreq_driver->policy[cpu]; object must be
399 #define show_one(file_name, object) \
400 static ssize_t show_##file_name \
401 (struct cpufreq_policy *policy, char *buf) \
403 return sprintf(buf, "%u\n", policy->object); \
406 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
407 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
408 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
409 show_one(scaling_min_freq, min);
410 show_one(scaling_max_freq, max);
411 show_one(scaling_cur_freq, cur);
413 static int __cpufreq_set_policy(struct cpufreq_policy *data,
414 struct cpufreq_policy *policy);
417 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
419 #define store_one(file_name, object) \
420 static ssize_t store_##file_name \
421 (struct cpufreq_policy *policy, const char *buf, size_t count) \
424 struct cpufreq_policy new_policy; \
426 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
430 ret = sscanf(buf, "%u", &new_policy.object); \
434 ret = __cpufreq_set_policy(policy, &new_policy); \
435 policy->user_policy.object = policy->object; \
437 return ret ? ret : count; \
440 store_one(scaling_min_freq, min);
441 store_one(scaling_max_freq, max);
444 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
446 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
449 unsigned int cur_freq = __cpufreq_get(policy->cpu);
451 return sprintf(buf, "<unknown>");
452 return sprintf(buf, "%u\n", cur_freq);
457 * show_scaling_governor - show the current policy for the specified CPU
459 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
461 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
462 return sprintf(buf, "powersave\n");
463 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
464 return sprintf(buf, "performance\n");
465 else if (policy->governor)
466 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
467 policy->governor->name);
473 * store_scaling_governor - store policy for the specified CPU
475 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
476 const char *buf, size_t count)
479 char str_governor[16];
480 struct cpufreq_policy new_policy;
482 ret = cpufreq_get_policy(&new_policy, policy->cpu);
486 ret = sscanf(buf, "%15s", str_governor);
490 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
491 &new_policy.governor))
494 /* Do not use cpufreq_set_policy here or the user_policy.max
495 will be wrongly overridden */
496 ret = __cpufreq_set_policy(policy, &new_policy);
498 policy->user_policy.policy = policy->policy;
499 policy->user_policy.governor = policy->governor;
508 * show_scaling_driver - show the cpufreq driver currently loaded
510 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
512 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
516 * show_scaling_available_governors - show the available CPUfreq governors
518 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
522 struct cpufreq_governor *t;
524 if (!cpufreq_driver->target) {
525 i += sprintf(buf, "performance powersave");
529 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
530 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
531 - (CPUFREQ_NAME_LEN + 2)))
533 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
536 i += sprintf(&buf[i], "\n");
540 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
545 for_each_cpu(cpu, mask) {
547 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
548 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
549 if (i >= (PAGE_SIZE - 5))
552 i += sprintf(&buf[i], "\n");
557 * show_related_cpus - show the CPUs affected by each transition even if
558 * hw coordination is in use
560 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
562 return show_cpus(policy->related_cpus, buf);
566 * show_affected_cpus - show the CPUs affected by each transition
568 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
570 return show_cpus(policy->cpus, buf);
573 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
574 const char *buf, size_t count)
576 unsigned int freq = 0;
579 if (!policy->governor || !policy->governor->store_setspeed)
582 ret = sscanf(buf, "%u", &freq);
586 policy->governor->store_setspeed(policy, freq);
591 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
593 if (!policy->governor || !policy->governor->show_setspeed)
594 return sprintf(buf, "<unsupported>\n");
596 return policy->governor->show_setspeed(policy, buf);
600 * show_bios_limit - show the current cpufreq HW/BIOS limitation
602 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
606 if (cpufreq_driver->bios_limit) {
607 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
609 return sprintf(buf, "%u\n", limit);
611 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
614 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
615 cpufreq_freq_attr_ro(cpuinfo_min_freq);
616 cpufreq_freq_attr_ro(cpuinfo_max_freq);
617 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
618 cpufreq_freq_attr_ro(scaling_available_governors);
619 cpufreq_freq_attr_ro(scaling_driver);
620 cpufreq_freq_attr_ro(scaling_cur_freq);
621 cpufreq_freq_attr_ro(bios_limit);
622 cpufreq_freq_attr_ro(related_cpus);
623 cpufreq_freq_attr_ro(affected_cpus);
624 cpufreq_freq_attr_rw(scaling_min_freq);
625 cpufreq_freq_attr_rw(scaling_max_freq);
626 cpufreq_freq_attr_rw(scaling_governor);
627 cpufreq_freq_attr_rw(scaling_setspeed);
629 static struct attribute *default_attrs[] = {
630 &cpuinfo_min_freq.attr,
631 &cpuinfo_max_freq.attr,
632 &cpuinfo_transition_latency.attr,
633 &scaling_min_freq.attr,
634 &scaling_max_freq.attr,
637 &scaling_governor.attr,
638 &scaling_driver.attr,
639 &scaling_available_governors.attr,
640 &scaling_setspeed.attr,
644 struct kobject *cpufreq_global_kobject;
645 EXPORT_SYMBOL(cpufreq_global_kobject);
647 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
648 #define to_attr(a) container_of(a, struct freq_attr, attr)
650 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
652 struct cpufreq_policy *policy = to_policy(kobj);
653 struct freq_attr *fattr = to_attr(attr);
654 ssize_t ret = -EINVAL;
655 policy = cpufreq_cpu_get_sysfs(policy->cpu);
659 if (lock_policy_rwsem_read(policy->cpu) < 0)
663 ret = fattr->show(policy, buf);
667 unlock_policy_rwsem_read(policy->cpu);
669 cpufreq_cpu_put_sysfs(policy);
674 static ssize_t store(struct kobject *kobj, struct attribute *attr,
675 const char *buf, size_t count)
677 struct cpufreq_policy *policy = to_policy(kobj);
678 struct freq_attr *fattr = to_attr(attr);
679 ssize_t ret = -EINVAL;
680 policy = cpufreq_cpu_get_sysfs(policy->cpu);
684 if (lock_policy_rwsem_write(policy->cpu) < 0)
688 ret = fattr->store(policy, buf, count);
692 unlock_policy_rwsem_write(policy->cpu);
694 cpufreq_cpu_put_sysfs(policy);
699 static void cpufreq_sysfs_release(struct kobject *kobj)
701 struct cpufreq_policy *policy = to_policy(kobj);
702 pr_debug("last reference is dropped\n");
703 complete(&policy->kobj_unregister);
706 static const struct sysfs_ops sysfs_ops = {
711 static struct kobj_type ktype_cpufreq = {
712 .sysfs_ops = &sysfs_ops,
713 .default_attrs = default_attrs,
714 .release = cpufreq_sysfs_release,
717 /* symlink affected CPUs */
718 static int cpufreq_add_dev_symlink(unsigned int cpu,
719 struct cpufreq_policy *policy)
724 for_each_cpu(j, policy->cpus) {
725 struct cpufreq_policy *managed_policy;
726 struct device *cpu_dev;
731 pr_debug("CPU %u already managed, adding link\n", j);
732 managed_policy = cpufreq_cpu_get(cpu);
733 cpu_dev = get_cpu_device(j);
734 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
737 cpufreq_cpu_put(managed_policy);
744 static int cpufreq_add_dev_interface(unsigned int cpu,
745 struct cpufreq_policy *policy,
748 struct cpufreq_policy new_policy;
749 struct freq_attr **drv_attr;
754 /* prepare interface data */
755 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
756 &dev->kobj, "cpufreq");
760 /* set up files for this cpu device */
761 drv_attr = cpufreq_driver->attr;
762 while ((drv_attr) && (*drv_attr)) {
763 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
765 goto err_out_kobj_put;
768 if (cpufreq_driver->get) {
769 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
771 goto err_out_kobj_put;
773 if (cpufreq_driver->target) {
774 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
776 goto err_out_kobj_put;
778 if (cpufreq_driver->bios_limit) {
779 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
781 goto err_out_kobj_put;
784 write_lock_irqsave(&cpufreq_driver_lock, flags);
785 for_each_cpu(j, policy->cpus) {
786 per_cpu(cpufreq_cpu_data, j) = policy;
787 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
789 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
791 ret = cpufreq_add_dev_symlink(cpu, policy);
793 goto err_out_kobj_put;
795 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
796 /* assure that the starting sequence is run in __cpufreq_set_policy */
797 policy->governor = NULL;
799 /* set default policy */
800 ret = __cpufreq_set_policy(policy, &new_policy);
801 policy->user_policy.policy = policy->policy;
802 policy->user_policy.governor = policy->governor;
805 pr_debug("setting policy failed\n");
806 if (cpufreq_driver->exit)
807 cpufreq_driver->exit(policy);
812 kobject_put(&policy->kobj);
813 wait_for_completion(&policy->kobj_unregister);
817 #ifdef CONFIG_HOTPLUG_CPU
818 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
821 struct cpufreq_policy *policy;
822 int ret = 0, has_target = !!cpufreq_driver->target;
825 policy = cpufreq_cpu_get(sibling);
829 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
831 lock_policy_rwsem_write(sibling);
833 write_lock_irqsave(&cpufreq_driver_lock, flags);
835 cpumask_set_cpu(cpu, policy->cpus);
836 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
837 per_cpu(cpufreq_cpu_data, cpu) = policy;
838 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
840 unlock_policy_rwsem_write(sibling);
843 __cpufreq_governor(policy, CPUFREQ_GOV_START);
844 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
847 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
849 cpufreq_cpu_put(policy);
858 * cpufreq_add_dev - add a CPU device
860 * Adds the cpufreq interface for a CPU device.
862 * The Oracle says: try running cpufreq registration/unregistration concurrently
863 * with with cpu hotplugging and all hell will break loose. Tried to clean this
864 * mess up, but more thorough testing is needed. - Mathieu
866 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
868 unsigned int j, cpu = dev->id;
870 struct cpufreq_policy *policy;
872 #ifdef CONFIG_HOTPLUG_CPU
873 struct cpufreq_governor *gov;
877 if (cpu_is_offline(cpu))
880 pr_debug("adding CPU %u\n", cpu);
883 /* check whether a different CPU already registered this
884 * CPU because it is in the same boat. */
885 policy = cpufreq_cpu_get(cpu);
886 if (unlikely(policy)) {
887 cpufreq_cpu_put(policy);
891 #ifdef CONFIG_HOTPLUG_CPU
892 /* Check if this cpu was hot-unplugged earlier and has siblings */
893 read_lock_irqsave(&cpufreq_driver_lock, flags);
894 for_each_online_cpu(sibling) {
895 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
896 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
897 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
898 return cpufreq_add_policy_cpu(cpu, sibling, dev);
901 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
905 if (!try_module_get(cpufreq_driver->owner)) {
910 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
914 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
915 goto err_free_policy;
917 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
918 goto err_free_cpumask;
921 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
922 cpumask_copy(policy->cpus, cpumask_of(cpu));
924 /* Initially set CPU itself as the policy_cpu */
925 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
927 init_completion(&policy->kobj_unregister);
928 INIT_WORK(&policy->update, handle_update);
930 /* call driver. From then on the cpufreq must be able
931 * to accept all calls to ->verify and ->setpolicy for this CPU
933 ret = cpufreq_driver->init(policy);
935 pr_debug("initialization failed\n");
936 goto err_set_policy_cpu;
939 /* related cpus should atleast have policy->cpus */
940 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
943 * affected cpus must always be the one, which are online. We aren't
944 * managing offline cpus here.
946 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
948 policy->user_policy.min = policy->min;
949 policy->user_policy.max = policy->max;
951 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
952 CPUFREQ_START, policy);
954 #ifdef CONFIG_HOTPLUG_CPU
955 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
957 policy->governor = gov;
958 pr_debug("Restoring governor %s for cpu %d\n",
959 policy->governor->name, cpu);
963 ret = cpufreq_add_dev_interface(cpu, policy, dev);
965 goto err_out_unregister;
967 kobject_uevent(&policy->kobj, KOBJ_ADD);
968 module_put(cpufreq_driver->owner);
969 pr_debug("initialization complete\n");
974 write_lock_irqsave(&cpufreq_driver_lock, flags);
975 for_each_cpu(j, policy->cpus)
976 per_cpu(cpufreq_cpu_data, j) = NULL;
977 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
979 kobject_put(&policy->kobj);
980 wait_for_completion(&policy->kobj_unregister);
983 per_cpu(cpufreq_policy_cpu, cpu) = -1;
984 free_cpumask_var(policy->related_cpus);
986 free_cpumask_var(policy->cpus);
990 module_put(cpufreq_driver->owner);
995 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
999 policy->last_cpu = policy->cpu;
1002 for_each_cpu(j, policy->cpus)
1003 per_cpu(cpufreq_policy_cpu, j) = cpu;
1005 #ifdef CONFIG_CPU_FREQ_TABLE
1006 cpufreq_frequency_table_update_policy_cpu(policy);
1008 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1009 CPUFREQ_UPDATE_POLICY_CPU, policy);
1013 * __cpufreq_remove_dev - remove a CPU device
1015 * Removes the cpufreq interface for a CPU device.
1016 * Caller should already have policy_rwsem in write mode for this CPU.
1017 * This routine frees the rwsem before returning.
1019 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1021 unsigned int cpu = dev->id, ret, cpus;
1022 unsigned long flags;
1023 struct cpufreq_policy *data;
1024 struct kobject *kobj;
1025 struct completion *cmp;
1026 struct device *cpu_dev;
1028 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1030 write_lock_irqsave(&cpufreq_driver_lock, flags);
1032 data = per_cpu(cpufreq_cpu_data, cpu);
1033 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1035 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1038 pr_debug("%s: No cpu_data found\n", __func__);
1042 if (cpufreq_driver->target)
1043 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1045 #ifdef CONFIG_HOTPLUG_CPU
1046 if (!cpufreq_driver->setpolicy)
1047 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1048 data->governor->name, CPUFREQ_NAME_LEN);
1051 WARN_ON(lock_policy_rwsem_write(cpu));
1052 cpus = cpumask_weight(data->cpus);
1055 cpumask_clear_cpu(cpu, data->cpus);
1056 unlock_policy_rwsem_write(cpu);
1058 if (cpu != data->cpu) {
1059 sysfs_remove_link(&dev->kobj, "cpufreq");
1060 } else if (cpus > 1) {
1061 /* first sibling now owns the new sysfs dir */
1062 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1063 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1064 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1066 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1068 WARN_ON(lock_policy_rwsem_write(cpu));
1069 cpumask_set_cpu(cpu, data->cpus);
1071 write_lock_irqsave(&cpufreq_driver_lock, flags);
1072 per_cpu(cpufreq_cpu_data, cpu) = data;
1073 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075 unlock_policy_rwsem_write(cpu);
1077 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1082 WARN_ON(lock_policy_rwsem_write(cpu));
1083 update_policy_cpu(data, cpu_dev->id);
1084 unlock_policy_rwsem_write(cpu);
1085 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1086 __func__, cpu_dev->id, cpu);
1089 /* If cpu is last user of policy, free policy */
1091 if (cpufreq_driver->target)
1092 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1094 lock_policy_rwsem_read(cpu);
1096 cmp = &data->kobj_unregister;
1097 unlock_policy_rwsem_read(cpu);
1100 /* we need to make sure that the underlying kobj is actually
1101 * not referenced anymore by anybody before we proceed with
1104 pr_debug("waiting for dropping of refcount\n");
1105 wait_for_completion(cmp);
1106 pr_debug("wait complete\n");
1108 if (cpufreq_driver->exit)
1109 cpufreq_driver->exit(data);
1111 free_cpumask_var(data->related_cpus);
1112 free_cpumask_var(data->cpus);
1115 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1116 cpufreq_cpu_put(data);
1117 if (cpufreq_driver->target) {
1118 __cpufreq_governor(data, CPUFREQ_GOV_START);
1119 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1123 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1128 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1130 unsigned int cpu = dev->id;
1133 if (cpu_is_offline(cpu))
1136 retval = __cpufreq_remove_dev(dev, sif);
1141 static void handle_update(struct work_struct *work)
1143 struct cpufreq_policy *policy =
1144 container_of(work, struct cpufreq_policy, update);
1145 unsigned int cpu = policy->cpu;
1146 pr_debug("handle_update for cpu %u called\n", cpu);
1147 cpufreq_update_policy(cpu);
1151 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1153 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1154 * @new_freq: CPU frequency the CPU actually runs at
1156 * We adjust to current frequency first, and need to clean up later.
1157 * So either call to cpufreq_update_policy() or schedule handle_update()).
1159 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1160 unsigned int new_freq)
1162 struct cpufreq_policy *policy;
1163 struct cpufreq_freqs freqs;
1164 unsigned long flags;
1167 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1168 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1170 freqs.old = old_freq;
1171 freqs.new = new_freq;
1173 read_lock_irqsave(&cpufreq_driver_lock, flags);
1174 policy = per_cpu(cpufreq_cpu_data, cpu);
1175 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1177 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1178 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1183 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1186 * This is the last known freq, without actually getting it from the driver.
1187 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1189 unsigned int cpufreq_quick_get(unsigned int cpu)
1191 struct cpufreq_policy *policy;
1192 unsigned int ret_freq = 0;
1194 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1195 return cpufreq_driver->get(cpu);
1197 policy = cpufreq_cpu_get(cpu);
1199 ret_freq = policy->cur;
1200 cpufreq_cpu_put(policy);
1205 EXPORT_SYMBOL(cpufreq_quick_get);
1208 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1211 * Just return the max possible frequency for a given CPU.
1213 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1215 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1216 unsigned int ret_freq = 0;
1219 ret_freq = policy->max;
1220 cpufreq_cpu_put(policy);
1225 EXPORT_SYMBOL(cpufreq_quick_get_max);
1228 static unsigned int __cpufreq_get(unsigned int cpu)
1230 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1231 unsigned int ret_freq = 0;
1233 if (!cpufreq_driver->get)
1236 ret_freq = cpufreq_driver->get(cpu);
1238 if (ret_freq && policy->cur &&
1239 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1240 /* verify no discrepancy between actual and
1241 saved value exists */
1242 if (unlikely(ret_freq != policy->cur)) {
1243 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1244 schedule_work(&policy->update);
1252 * cpufreq_get - get the current CPU frequency (in kHz)
1255 * Get the CPU current (static) CPU frequency
1257 unsigned int cpufreq_get(unsigned int cpu)
1259 unsigned int ret_freq = 0;
1260 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1265 if (unlikely(lock_policy_rwsem_read(cpu)))
1268 ret_freq = __cpufreq_get(cpu);
1270 unlock_policy_rwsem_read(cpu);
1273 cpufreq_cpu_put(policy);
1277 EXPORT_SYMBOL(cpufreq_get);
1279 static struct subsys_interface cpufreq_interface = {
1281 .subsys = &cpu_subsys,
1282 .add_dev = cpufreq_add_dev,
1283 .remove_dev = cpufreq_remove_dev,
1288 * cpufreq_suspend() - Suspend CPUFreq governors
1290 * Called during system wide Suspend/Hibernate cycles for suspending governors
1291 * as some platforms can't change frequency after this point in suspend cycle.
1292 * Because some of the devices (like: i2c, regulators, etc) they use for
1293 * changing frequency are suspended quickly after this point.
1295 void cpufreq_suspend(void)
1297 struct cpufreq_policy *policy;
1300 if (!cpufreq_driver)
1306 pr_debug("%s: Suspending Governors\n", __func__);
1308 for_each_possible_cpu(cpu) {
1309 if (!cpu_online(cpu))
1312 policy = cpufreq_cpu_get(cpu);
1314 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1315 pr_err("%s: Failed to stop governor for policy: %p\n",
1317 else if (cpufreq_driver->suspend
1318 && cpufreq_driver->suspend(policy))
1319 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1323 cpufreq_suspended = true;
1327 * cpufreq_resume() - Resume CPUFreq governors
1329 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1330 * are suspended with cpufreq_suspend().
1332 void cpufreq_resume(void)
1334 struct cpufreq_policy *policy;
1337 if (!cpufreq_driver)
1343 pr_debug("%s: Resuming Governors\n", __func__);
1345 cpufreq_suspended = false;
1347 for_each_possible_cpu(cpu) {
1348 if (!cpu_online(cpu))
1351 policy = cpufreq_cpu_get(cpu);
1353 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1354 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1355 pr_err("%s: Failed to start governor for policy: %p\n",
1357 else if (cpufreq_driver->resume
1358 && cpufreq_driver->resume(policy))
1359 pr_err("%s: Failed to resume driver: %p\n", __func__,
1363 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1364 * policy in list. It will verify that the current freq is in
1365 * sync with what we believe it to be.
1368 schedule_work(&policy->update);
1373 * cpufreq_get_current_driver - return current driver's name
1375 * Return the name string of the currently loaded cpufreq driver
1378 const char *cpufreq_get_current_driver(void)
1381 return cpufreq_driver->name;
1385 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1387 /*********************************************************************
1388 * NOTIFIER LISTS INTERFACE *
1389 *********************************************************************/
1392 * cpufreq_register_notifier - register a driver with cpufreq
1393 * @nb: notifier function to register
1394 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1396 * Add a driver to one of two lists: either a list of drivers that
1397 * are notified about clock rate changes (once before and once after
1398 * the transition), or a list of drivers that are notified about
1399 * changes in cpufreq policy.
1401 * This function may sleep, and has the same return conditions as
1402 * blocking_notifier_chain_register.
1404 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1408 if (cpufreq_disabled())
1411 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1414 case CPUFREQ_TRANSITION_NOTIFIER:
1415 ret = srcu_notifier_chain_register(
1416 &cpufreq_transition_notifier_list, nb);
1418 case CPUFREQ_POLICY_NOTIFIER:
1419 ret = blocking_notifier_chain_register(
1420 &cpufreq_policy_notifier_list, nb);
1428 EXPORT_SYMBOL(cpufreq_register_notifier);
1432 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1433 * @nb: notifier block to be unregistered
1434 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1436 * Remove a driver from the CPU frequency notifier list.
1438 * This function may sleep, and has the same return conditions as
1439 * blocking_notifier_chain_unregister.
1441 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1445 if (cpufreq_disabled())
1449 case CPUFREQ_TRANSITION_NOTIFIER:
1450 ret = srcu_notifier_chain_unregister(
1451 &cpufreq_transition_notifier_list, nb);
1453 case CPUFREQ_POLICY_NOTIFIER:
1454 ret = blocking_notifier_chain_unregister(
1455 &cpufreq_policy_notifier_list, nb);
1463 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1466 /*********************************************************************
1468 *********************************************************************/
1471 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1472 unsigned int target_freq,
1473 unsigned int relation)
1475 int retval = -EINVAL;
1476 unsigned int old_target_freq = target_freq;
1478 if (cpufreq_disabled())
1481 /* Make sure that target_freq is within supported range */
1482 if (target_freq > policy->max)
1483 target_freq = policy->max;
1484 if (target_freq < policy->min)
1485 target_freq = policy->min;
1487 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1488 policy->cpu, target_freq, relation, old_target_freq);
1490 if (target_freq == policy->cur)
1493 if (cpufreq_driver->target)
1494 retval = cpufreq_driver->target(policy, target_freq, relation);
1498 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1500 int cpufreq_driver_target(struct cpufreq_policy *policy,
1501 unsigned int target_freq,
1502 unsigned int relation)
1506 policy = cpufreq_cpu_get(policy->cpu);
1510 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1513 ret = __cpufreq_driver_target(policy, target_freq, relation);
1515 unlock_policy_rwsem_write(policy->cpu);
1518 cpufreq_cpu_put(policy);
1522 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1524 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1528 if (cpufreq_disabled())
1531 if (!cpufreq_driver->getavg)
1534 policy = cpufreq_cpu_get(policy->cpu);
1538 ret = cpufreq_driver->getavg(policy, cpu);
1540 cpufreq_cpu_put(policy);
1543 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1546 * when "event" is CPUFREQ_GOV_LIMITS
1549 static int __cpufreq_governor(struct cpufreq_policy *policy,
1554 /* Only must be defined when default governor is known to have latency
1555 restrictions, like e.g. conservative or ondemand.
1556 That this is the case is already ensured in Kconfig
1558 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1559 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1561 struct cpufreq_governor *gov = NULL;
1564 /* Don't start any governor operations if we are entering suspend */
1565 if (cpufreq_suspended)
1568 if (policy->governor->max_transition_latency &&
1569 policy->cpuinfo.transition_latency >
1570 policy->governor->max_transition_latency) {
1574 printk(KERN_WARNING "%s governor failed, too long"
1575 " transition latency of HW, fallback"
1576 " to %s governor\n",
1577 policy->governor->name,
1579 policy->governor = gov;
1583 if (!try_module_get(policy->governor->owner))
1586 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1587 policy->cpu, event);
1589 mutex_lock(&cpufreq_governor_lock);
1590 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1591 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1592 mutex_unlock(&cpufreq_governor_lock);
1596 if (event == CPUFREQ_GOV_STOP)
1597 policy->governor_enabled = false;
1598 else if (event == CPUFREQ_GOV_START)
1599 policy->governor_enabled = true;
1601 mutex_unlock(&cpufreq_governor_lock);
1603 ret = policy->governor->governor(policy, event);
1606 if (event == CPUFREQ_GOV_POLICY_INIT)
1607 policy->governor->initialized++;
1608 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1609 policy->governor->initialized--;
1611 /* Restore original values */
1612 mutex_lock(&cpufreq_governor_lock);
1613 if (event == CPUFREQ_GOV_STOP)
1614 policy->governor_enabled = true;
1615 else if (event == CPUFREQ_GOV_START)
1616 policy->governor_enabled = false;
1617 mutex_unlock(&cpufreq_governor_lock);
1620 /* we keep one module reference alive for
1621 each CPU governed by this CPU */
1622 if ((event != CPUFREQ_GOV_START) || ret)
1623 module_put(policy->governor->owner);
1624 if ((event == CPUFREQ_GOV_STOP) && !ret)
1625 module_put(policy->governor->owner);
1631 int cpufreq_register_governor(struct cpufreq_governor *governor)
1638 if (cpufreq_disabled())
1641 mutex_lock(&cpufreq_governor_mutex);
1643 governor->initialized = 0;
1645 if (__find_governor(governor->name) == NULL) {
1647 list_add(&governor->governor_list, &cpufreq_governor_list);
1650 mutex_unlock(&cpufreq_governor_mutex);
1653 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1656 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1658 #ifdef CONFIG_HOTPLUG_CPU
1665 if (cpufreq_disabled())
1668 #ifdef CONFIG_HOTPLUG_CPU
1669 for_each_present_cpu(cpu) {
1670 if (cpu_online(cpu))
1672 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1673 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1677 mutex_lock(&cpufreq_governor_mutex);
1678 list_del(&governor->governor_list);
1679 mutex_unlock(&cpufreq_governor_mutex);
1682 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1686 /*********************************************************************
1687 * POLICY INTERFACE *
1688 *********************************************************************/
1691 * cpufreq_get_policy - get the current cpufreq_policy
1692 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1695 * Reads the current cpufreq policy.
1697 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1699 struct cpufreq_policy *cpu_policy;
1703 cpu_policy = cpufreq_cpu_get(cpu);
1707 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1709 cpufreq_cpu_put(cpu_policy);
1712 EXPORT_SYMBOL(cpufreq_get_policy);
1716 * data : current policy.
1717 * policy : policy to be set.
1719 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1720 struct cpufreq_policy *policy)
1722 int ret = 0, failed = 1;
1724 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1725 policy->min, policy->max);
1727 memcpy(&policy->cpuinfo, &data->cpuinfo,
1728 sizeof(struct cpufreq_cpuinfo));
1730 if (policy->min > data->max || policy->max < data->min) {
1735 /* verify the cpu speed can be set within this limit */
1736 ret = cpufreq_driver->verify(policy);
1740 /* adjust if necessary - all reasons */
1741 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1742 CPUFREQ_ADJUST, policy);
1744 /* adjust if necessary - hardware incompatibility*/
1745 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1746 CPUFREQ_INCOMPATIBLE, policy);
1748 /* verify the cpu speed can be set within this limit,
1749 which might be different to the first one */
1750 ret = cpufreq_driver->verify(policy);
1754 /* notification of the new policy */
1755 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1756 CPUFREQ_NOTIFY, policy);
1758 data->min = policy->min;
1759 data->max = policy->max;
1761 pr_debug("new min and max freqs are %u - %u kHz\n",
1762 data->min, data->max);
1764 if (cpufreq_driver->setpolicy) {
1765 data->policy = policy->policy;
1766 pr_debug("setting range\n");
1767 ret = cpufreq_driver->setpolicy(policy);
1769 if (policy->governor != data->governor) {
1770 /* save old, working values */
1771 struct cpufreq_governor *old_gov = data->governor;
1773 pr_debug("governor switch\n");
1775 /* end old governor */
1776 if (data->governor) {
1777 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1778 unlock_policy_rwsem_write(policy->cpu);
1779 __cpufreq_governor(data,
1780 CPUFREQ_GOV_POLICY_EXIT);
1781 lock_policy_rwsem_write(policy->cpu);
1784 /* start new governor */
1785 data->governor = policy->governor;
1786 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1787 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1790 unlock_policy_rwsem_write(policy->cpu);
1791 __cpufreq_governor(data,
1792 CPUFREQ_GOV_POLICY_EXIT);
1793 lock_policy_rwsem_write(policy->cpu);
1798 /* new governor failed, so re-start old one */
1799 pr_debug("starting governor %s failed\n",
1800 data->governor->name);
1802 data->governor = old_gov;
1803 __cpufreq_governor(data,
1804 CPUFREQ_GOV_POLICY_INIT);
1805 __cpufreq_governor(data,
1811 /* might be a policy change, too, so fall through */
1813 pr_debug("governor: change or update limits\n");
1814 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1822 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1823 * @cpu: CPU which shall be re-evaluated
1825 * Useful for policy notifiers which have different necessities
1826 * at different times.
1828 int cpufreq_update_policy(unsigned int cpu)
1830 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1831 struct cpufreq_policy policy;
1839 if (unlikely(lock_policy_rwsem_write(cpu))) {
1844 pr_debug("updating policy for CPU %u\n", cpu);
1845 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1846 policy.min = data->user_policy.min;
1847 policy.max = data->user_policy.max;
1848 policy.policy = data->user_policy.policy;
1849 policy.governor = data->user_policy.governor;
1851 /* BIOS might change freq behind our back
1852 -> ask driver for current freq and notify governors about a change */
1853 if (cpufreq_driver->get) {
1854 policy.cur = cpufreq_driver->get(cpu);
1856 pr_debug("Driver did not initialize current freq");
1857 data->cur = policy.cur;
1859 if (data->cur != policy.cur && cpufreq_driver->target)
1860 cpufreq_out_of_sync(cpu, data->cur,
1865 ret = __cpufreq_set_policy(data, &policy);
1867 unlock_policy_rwsem_write(cpu);
1870 cpufreq_cpu_put(data);
1874 EXPORT_SYMBOL(cpufreq_update_policy);
1876 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1877 unsigned long action, void *hcpu)
1879 unsigned int cpu = (unsigned long)hcpu;
1882 dev = get_cpu_device(cpu);
1886 case CPU_ONLINE_FROZEN:
1887 cpufreq_add_dev(dev, NULL);
1889 case CPU_DOWN_PREPARE:
1890 case CPU_DOWN_PREPARE_FROZEN:
1891 __cpufreq_remove_dev(dev, NULL);
1893 case CPU_DOWN_FAILED:
1894 case CPU_DOWN_FAILED_FROZEN:
1895 cpufreq_add_dev(dev, NULL);
1902 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1903 .notifier_call = cpufreq_cpu_callback,
1906 /*********************************************************************
1907 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1908 *********************************************************************/
1911 * cpufreq_register_driver - register a CPU Frequency driver
1912 * @driver_data: A struct cpufreq_driver containing the values#
1913 * submitted by the CPU Frequency driver.
1915 * Registers a CPU Frequency driver to this core code. This code
1916 * returns zero on success, -EBUSY when another driver got here first
1917 * (and isn't unregistered in the meantime).
1920 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1922 unsigned long flags;
1925 if (cpufreq_disabled())
1928 if (!driver_data || !driver_data->verify || !driver_data->init ||
1929 ((!driver_data->setpolicy) && (!driver_data->target)))
1932 pr_debug("trying to register driver %s\n", driver_data->name);
1934 if (driver_data->setpolicy)
1935 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1937 write_lock_irqsave(&cpufreq_driver_lock, flags);
1938 if (cpufreq_driver) {
1939 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1942 cpufreq_driver = driver_data;
1943 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1945 ret = subsys_interface_register(&cpufreq_interface);
1947 goto err_null_driver;
1949 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1953 /* check for at least one working CPU */
1954 for (i = 0; i < nr_cpu_ids; i++)
1955 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1960 /* if all ->init() calls failed, unregister */
1962 pr_debug("no CPU initialized for driver %s\n",
1968 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1969 pr_debug("driver %s up and running\n", driver_data->name);
1973 subsys_interface_unregister(&cpufreq_interface);
1975 write_lock_irqsave(&cpufreq_driver_lock, flags);
1976 cpufreq_driver = NULL;
1977 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1980 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1984 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1986 * Unregister the current CPUFreq driver. Only call this if you have
1987 * the right to do so, i.e. if you have succeeded in initialising before!
1988 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1989 * currently not initialised.
1991 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1993 unsigned long flags;
1995 if (!cpufreq_driver || (driver != cpufreq_driver))
1998 pr_debug("unregistering driver %s\n", driver->name);
2000 subsys_interface_unregister(&cpufreq_interface);
2001 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2003 write_lock_irqsave(&cpufreq_driver_lock, flags);
2004 cpufreq_driver = NULL;
2005 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2009 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2011 static int __init cpufreq_core_init(void)
2015 if (cpufreq_disabled())
2018 for_each_possible_cpu(cpu) {
2019 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2020 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2023 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2024 BUG_ON(!cpufreq_global_kobject);
2028 core_initcall(cpufreq_core_init);