2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 #define lock_policy_rwsem(mode, cpu) \
70 static void lock_policy_rwsem_##mode(int cpu) \
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
77 lock_policy_rwsem(read, cpu);
78 lock_policy_rwsem(write, cpu);
80 #define unlock_policy_rwsem(mode, cpu) \
81 static void unlock_policy_rwsem_##mode(int cpu) \
83 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
85 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
88 unlock_policy_rwsem(read, cpu);
89 unlock_policy_rwsem(write, cpu);
92 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
95 static DECLARE_RWSEM(cpufreq_rwsem);
97 /* internal prototypes */
98 static int __cpufreq_governor(struct cpufreq_policy *policy,
100 static unsigned int __cpufreq_get(unsigned int cpu);
101 static void handle_update(struct work_struct *work);
104 * Two notifier lists: the "policy" list is involved in the
105 * validation process for a new CPU frequency policy; the
106 * "transition" list for kernel code that needs to handle
107 * changes to devices when the CPU clock speed changes.
108 * The mutex locks both lists.
110 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
111 static struct srcu_notifier_head cpufreq_transition_notifier_list;
113 static bool init_cpufreq_transition_notifier_list_called;
114 static int __init init_cpufreq_transition_notifier_list(void)
116 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
117 init_cpufreq_transition_notifier_list_called = true;
120 pure_initcall(init_cpufreq_transition_notifier_list);
122 static int off __read_mostly;
123 static int cpufreq_disabled(void)
127 void disable_cpufreq(void)
131 static LIST_HEAD(cpufreq_governor_list);
132 static DEFINE_MUTEX(cpufreq_governor_mutex);
134 bool have_governor_per_policy(void)
136 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
138 EXPORT_SYMBOL_GPL(have_governor_per_policy);
140 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
142 if (have_governor_per_policy())
143 return &policy->kobj;
145 return cpufreq_global_kobject;
147 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
149 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
155 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
157 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
164 idle_time = cur_wall_time - busy_time;
166 *wall = cputime_to_usecs(cur_wall_time);
168 return cputime_to_usecs(idle_time);
171 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
173 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
175 if (idle_time == -1ULL)
176 return get_cpu_idle_time_jiffy(cpu, wall);
178 idle_time += get_cpu_iowait_time_us(cpu, wall);
182 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
185 * This is a generic cpufreq init() routine which can be used by cpufreq
186 * drivers of SMP systems. It will do following:
187 * - validate & show freq table passed
188 * - set policies transition latency
189 * - policy->cpus with all possible CPUs
191 int cpufreq_generic_init(struct cpufreq_policy *policy,
192 struct cpufreq_frequency_table *table,
193 unsigned int transition_latency)
197 ret = cpufreq_table_validate_and_show(policy, table);
199 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
203 policy->cpuinfo.transition_latency = transition_latency;
206 * The driver only supports the SMP configuartion where all processors
207 * share the clock and voltage and clock.
209 cpumask_setall(policy->cpus);
213 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
215 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
217 struct cpufreq_policy *policy = NULL;
220 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
223 if (!down_read_trylock(&cpufreq_rwsem))
226 /* get the cpufreq driver */
227 read_lock_irqsave(&cpufreq_driver_lock, flags);
229 if (cpufreq_driver) {
231 policy = per_cpu(cpufreq_cpu_data, cpu);
233 kobject_get(&policy->kobj);
236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
239 up_read(&cpufreq_rwsem);
243 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
245 void cpufreq_cpu_put(struct cpufreq_policy *policy)
247 if (cpufreq_disabled())
250 kobject_put(&policy->kobj);
251 up_read(&cpufreq_rwsem);
253 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
255 /*********************************************************************
256 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
257 *********************************************************************/
260 * adjust_jiffies - adjust the system "loops_per_jiffy"
262 * This function alters the system "loops_per_jiffy" for the clock
263 * speed change. Note that loops_per_jiffy cannot be updated on SMP
264 * systems as each CPU might be scaled differently. So, use the arch
265 * per-CPU loops_per_jiffy value wherever possible.
268 static unsigned long l_p_j_ref;
269 static unsigned int l_p_j_ref_freq;
271 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
273 if (ci->flags & CPUFREQ_CONST_LOOPS)
276 if (!l_p_j_ref_freq) {
277 l_p_j_ref = loops_per_jiffy;
278 l_p_j_ref_freq = ci->old;
279 pr_debug("saving %lu as reference value for loops_per_jiffy; "
280 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
282 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
283 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
284 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
286 pr_debug("scaling loops_per_jiffy to %lu "
287 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
291 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
297 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
298 struct cpufreq_freqs *freqs, unsigned int state)
300 BUG_ON(irqs_disabled());
302 if (cpufreq_disabled())
305 freqs->flags = cpufreq_driver->flags;
306 pr_debug("notification %u of frequency transition to %u kHz\n",
311 case CPUFREQ_PRECHANGE:
312 /* detect if the driver reported a value as "old frequency"
313 * which is not equal to what the cpufreq core thinks is
316 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
317 if ((policy) && (policy->cpu == freqs->cpu) &&
318 (policy->cur) && (policy->cur != freqs->old)) {
319 pr_debug("Warning: CPU frequency is"
320 " %u, cpufreq assumed %u kHz.\n",
321 freqs->old, policy->cur);
322 freqs->old = policy->cur;
325 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
326 CPUFREQ_PRECHANGE, freqs);
327 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
330 case CPUFREQ_POSTCHANGE:
331 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
332 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
333 (unsigned long)freqs->cpu);
334 trace_cpu_frequency(freqs->new, freqs->cpu);
335 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
336 CPUFREQ_POSTCHANGE, freqs);
337 if (likely(policy) && likely(policy->cpu == freqs->cpu))
338 policy->cur = freqs->new;
344 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
345 * on frequency transition.
347 * This function calls the transition notifiers and the "adjust_jiffies"
348 * function. It is called twice on all CPU frequency changes that have
351 void cpufreq_notify_transition(struct cpufreq_policy *policy,
352 struct cpufreq_freqs *freqs, unsigned int state)
354 for_each_cpu(freqs->cpu, policy->cpus)
355 __cpufreq_notify_transition(policy, freqs, state);
357 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
360 /*********************************************************************
362 *********************************************************************/
364 static struct cpufreq_governor *__find_governor(const char *str_governor)
366 struct cpufreq_governor *t;
368 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
369 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
376 * cpufreq_parse_governor - parse a governor string
378 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
379 struct cpufreq_governor **governor)
386 if (cpufreq_driver->setpolicy) {
387 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
388 *policy = CPUFREQ_POLICY_PERFORMANCE;
390 } else if (!strnicmp(str_governor, "powersave",
392 *policy = CPUFREQ_POLICY_POWERSAVE;
395 } else if (cpufreq_driver->target) {
396 struct cpufreq_governor *t;
398 mutex_lock(&cpufreq_governor_mutex);
400 t = __find_governor(str_governor);
405 mutex_unlock(&cpufreq_governor_mutex);
406 ret = request_module("cpufreq_%s", str_governor);
407 mutex_lock(&cpufreq_governor_mutex);
410 t = __find_governor(str_governor);
418 mutex_unlock(&cpufreq_governor_mutex);
425 * cpufreq_per_cpu_attr_read() / show_##file_name() -
426 * print out cpufreq information
428 * Write out information from cpufreq_driver->policy[cpu]; object must be
432 #define show_one(file_name, object) \
433 static ssize_t show_##file_name \
434 (struct cpufreq_policy *policy, char *buf) \
436 return sprintf(buf, "%u\n", policy->object); \
439 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
440 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
441 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
442 show_one(scaling_min_freq, min);
443 show_one(scaling_max_freq, max);
444 show_one(scaling_cur_freq, cur);
446 static int cpufreq_set_policy(struct cpufreq_policy *policy,
447 struct cpufreq_policy *new_policy);
450 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
452 #define store_one(file_name, object) \
453 static ssize_t store_##file_name \
454 (struct cpufreq_policy *policy, const char *buf, size_t count) \
457 struct cpufreq_policy new_policy; \
459 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
463 ret = sscanf(buf, "%u", &new_policy.object); \
467 ret = cpufreq_set_policy(policy, &new_policy); \
468 policy->user_policy.object = policy->object; \
470 return ret ? ret : count; \
473 store_one(scaling_min_freq, min);
474 store_one(scaling_max_freq, max);
477 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
479 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
482 unsigned int cur_freq = __cpufreq_get(policy->cpu);
484 return sprintf(buf, "<unknown>");
485 return sprintf(buf, "%u\n", cur_freq);
489 * show_scaling_governor - show the current policy for the specified CPU
491 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
493 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
494 return sprintf(buf, "powersave\n");
495 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
496 return sprintf(buf, "performance\n");
497 else if (policy->governor)
498 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
499 policy->governor->name);
504 * store_scaling_governor - store policy for the specified CPU
506 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
507 const char *buf, size_t count)
510 char str_governor[16];
511 struct cpufreq_policy new_policy;
513 ret = cpufreq_get_policy(&new_policy, policy->cpu);
517 ret = sscanf(buf, "%15s", str_governor);
521 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
522 &new_policy.governor))
525 ret = cpufreq_set_policy(policy, &new_policy);
527 policy->user_policy.policy = policy->policy;
528 policy->user_policy.governor = policy->governor;
537 * show_scaling_driver - show the cpufreq driver currently loaded
539 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
541 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
545 * show_scaling_available_governors - show the available CPUfreq governors
547 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
551 struct cpufreq_governor *t;
553 if (!cpufreq_driver->target) {
554 i += sprintf(buf, "performance powersave");
558 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
559 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
560 - (CPUFREQ_NAME_LEN + 2)))
562 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
565 i += sprintf(&buf[i], "\n");
569 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
574 for_each_cpu(cpu, mask) {
576 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
577 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
578 if (i >= (PAGE_SIZE - 5))
581 i += sprintf(&buf[i], "\n");
584 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
587 * show_related_cpus - show the CPUs affected by each transition even if
588 * hw coordination is in use
590 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
592 return cpufreq_show_cpus(policy->related_cpus, buf);
596 * show_affected_cpus - show the CPUs affected by each transition
598 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
600 return cpufreq_show_cpus(policy->cpus, buf);
603 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
604 const char *buf, size_t count)
606 unsigned int freq = 0;
609 if (!policy->governor || !policy->governor->store_setspeed)
612 ret = sscanf(buf, "%u", &freq);
616 policy->governor->store_setspeed(policy, freq);
621 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
623 if (!policy->governor || !policy->governor->show_setspeed)
624 return sprintf(buf, "<unsupported>\n");
626 return policy->governor->show_setspeed(policy, buf);
630 * show_bios_limit - show the current cpufreq HW/BIOS limitation
632 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
636 if (cpufreq_driver->bios_limit) {
637 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
639 return sprintf(buf, "%u\n", limit);
641 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
644 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
645 cpufreq_freq_attr_ro(cpuinfo_min_freq);
646 cpufreq_freq_attr_ro(cpuinfo_max_freq);
647 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
648 cpufreq_freq_attr_ro(scaling_available_governors);
649 cpufreq_freq_attr_ro(scaling_driver);
650 cpufreq_freq_attr_ro(scaling_cur_freq);
651 cpufreq_freq_attr_ro(bios_limit);
652 cpufreq_freq_attr_ro(related_cpus);
653 cpufreq_freq_attr_ro(affected_cpus);
654 cpufreq_freq_attr_rw(scaling_min_freq);
655 cpufreq_freq_attr_rw(scaling_max_freq);
656 cpufreq_freq_attr_rw(scaling_governor);
657 cpufreq_freq_attr_rw(scaling_setspeed);
659 static struct attribute *default_attrs[] = {
660 &cpuinfo_min_freq.attr,
661 &cpuinfo_max_freq.attr,
662 &cpuinfo_transition_latency.attr,
663 &scaling_min_freq.attr,
664 &scaling_max_freq.attr,
667 &scaling_governor.attr,
668 &scaling_driver.attr,
669 &scaling_available_governors.attr,
670 &scaling_setspeed.attr,
674 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
675 #define to_attr(a) container_of(a, struct freq_attr, attr)
677 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
679 struct cpufreq_policy *policy = to_policy(kobj);
680 struct freq_attr *fattr = to_attr(attr);
683 if (!down_read_trylock(&cpufreq_rwsem))
686 lock_policy_rwsem_read(policy->cpu);
689 ret = fattr->show(policy, buf);
693 unlock_policy_rwsem_read(policy->cpu);
694 up_read(&cpufreq_rwsem);
699 static ssize_t store(struct kobject *kobj, struct attribute *attr,
700 const char *buf, size_t count)
702 struct cpufreq_policy *policy = to_policy(kobj);
703 struct freq_attr *fattr = to_attr(attr);
704 ssize_t ret = -EINVAL;
708 if (!cpu_online(policy->cpu))
711 if (!down_read_trylock(&cpufreq_rwsem))
714 lock_policy_rwsem_write(policy->cpu);
717 ret = fattr->store(policy, buf, count);
721 unlock_policy_rwsem_write(policy->cpu);
723 up_read(&cpufreq_rwsem);
730 static void cpufreq_sysfs_release(struct kobject *kobj)
732 struct cpufreq_policy *policy = to_policy(kobj);
733 pr_debug("last reference is dropped\n");
734 complete(&policy->kobj_unregister);
737 static const struct sysfs_ops sysfs_ops = {
742 static struct kobj_type ktype_cpufreq = {
743 .sysfs_ops = &sysfs_ops,
744 .default_attrs = default_attrs,
745 .release = cpufreq_sysfs_release,
748 struct kobject *cpufreq_global_kobject;
749 EXPORT_SYMBOL(cpufreq_global_kobject);
751 static int cpufreq_global_kobject_usage;
753 int cpufreq_get_global_kobject(void)
755 if (!cpufreq_global_kobject_usage++)
756 return kobject_add(cpufreq_global_kobject,
757 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
761 EXPORT_SYMBOL(cpufreq_get_global_kobject);
763 void cpufreq_put_global_kobject(void)
765 if (!--cpufreq_global_kobject_usage)
766 kobject_del(cpufreq_global_kobject);
768 EXPORT_SYMBOL(cpufreq_put_global_kobject);
770 int cpufreq_sysfs_create_file(const struct attribute *attr)
772 int ret = cpufreq_get_global_kobject();
775 ret = sysfs_create_file(cpufreq_global_kobject, attr);
777 cpufreq_put_global_kobject();
782 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
784 void cpufreq_sysfs_remove_file(const struct attribute *attr)
786 sysfs_remove_file(cpufreq_global_kobject, attr);
787 cpufreq_put_global_kobject();
789 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
791 /* symlink affected CPUs */
792 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
797 for_each_cpu(j, policy->cpus) {
798 struct device *cpu_dev;
800 if (j == policy->cpu)
803 pr_debug("Adding link for CPU: %u\n", j);
804 cpu_dev = get_cpu_device(j);
805 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
813 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
816 struct freq_attr **drv_attr;
819 /* prepare interface data */
820 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
821 &dev->kobj, "cpufreq");
825 /* set up files for this cpu device */
826 drv_attr = cpufreq_driver->attr;
827 while ((drv_attr) && (*drv_attr)) {
828 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
830 goto err_out_kobj_put;
833 if (cpufreq_driver->get) {
834 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
836 goto err_out_kobj_put;
838 if (cpufreq_driver->target) {
839 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
841 goto err_out_kobj_put;
843 if (cpufreq_driver->bios_limit) {
844 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
846 goto err_out_kobj_put;
849 ret = cpufreq_add_dev_symlink(policy);
851 goto err_out_kobj_put;
856 kobject_put(&policy->kobj);
857 wait_for_completion(&policy->kobj_unregister);
861 static void cpufreq_init_policy(struct cpufreq_policy *policy)
863 struct cpufreq_policy new_policy;
866 memcpy(&new_policy, policy, sizeof(*policy));
867 /* assure that the starting sequence is run in cpufreq_set_policy */
868 policy->governor = NULL;
870 /* set default policy */
871 ret = cpufreq_set_policy(policy, &new_policy);
872 policy->user_policy.policy = policy->policy;
873 policy->user_policy.governor = policy->governor;
876 pr_debug("setting policy failed\n");
877 if (cpufreq_driver->exit)
878 cpufreq_driver->exit(policy);
882 #ifdef CONFIG_HOTPLUG_CPU
883 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
884 unsigned int cpu, struct device *dev,
887 int ret = 0, has_target = !!cpufreq_driver->target;
891 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
893 pr_err("%s: Failed to stop governor\n", __func__);
898 lock_policy_rwsem_write(policy->cpu);
900 write_lock_irqsave(&cpufreq_driver_lock, flags);
902 cpumask_set_cpu(cpu, policy->cpus);
903 per_cpu(cpufreq_cpu_data, cpu) = policy;
904 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 unlock_policy_rwsem_write(policy->cpu);
909 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
910 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
911 pr_err("%s: Failed to start governor\n", __func__);
916 /* Don't touch sysfs links during light-weight init */
918 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
924 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
926 struct cpufreq_policy *policy;
929 read_lock_irqsave(&cpufreq_driver_lock, flags);
931 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
933 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
938 static struct cpufreq_policy *cpufreq_policy_alloc(void)
940 struct cpufreq_policy *policy;
942 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
946 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
947 goto err_free_policy;
949 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
950 goto err_free_cpumask;
952 INIT_LIST_HEAD(&policy->policy_list);
956 free_cpumask_var(policy->cpus);
963 static void cpufreq_policy_free(struct cpufreq_policy *policy)
965 free_cpumask_var(policy->related_cpus);
966 free_cpumask_var(policy->cpus);
970 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
972 if (WARN_ON(cpu == policy->cpu))
976 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
977 * Also lock for last cpu is enough here as contention will happen only
978 * after policy->cpu is changed and after it is changed, other threads
979 * will try to acquire lock for new cpu. And policy is already updated
982 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
984 policy->last_cpu = policy->cpu;
987 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
989 cpufreq_frequency_table_update_policy_cpu(policy);
990 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
991 CPUFREQ_UPDATE_POLICY_CPU, policy);
994 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
997 unsigned int j, cpu = dev->id;
999 struct cpufreq_policy *policy;
1000 unsigned long flags;
1001 #ifdef CONFIG_HOTPLUG_CPU
1002 struct cpufreq_policy *tpolicy;
1003 struct cpufreq_governor *gov;
1006 if (cpu_is_offline(cpu))
1009 pr_debug("adding CPU %u\n", cpu);
1012 /* check whether a different CPU already registered this
1013 * CPU because it is in the same boat. */
1014 policy = cpufreq_cpu_get(cpu);
1015 if (unlikely(policy)) {
1016 cpufreq_cpu_put(policy);
1021 if (!down_read_trylock(&cpufreq_rwsem))
1024 #ifdef CONFIG_HOTPLUG_CPU
1025 /* Check if this cpu was hot-unplugged earlier and has siblings */
1026 read_lock_irqsave(&cpufreq_driver_lock, flags);
1027 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1028 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1029 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1030 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
1031 up_read(&cpufreq_rwsem);
1035 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 /* Restore the saved policy when doing light-weight init */
1040 policy = cpufreq_policy_restore(cpu);
1042 policy = cpufreq_policy_alloc();
1049 * In the resume path, since we restore a saved policy, the assignment
1050 * to policy->cpu is like an update of the existing policy, rather than
1051 * the creation of a brand new one. So we need to perform this update
1052 * by invoking update_policy_cpu().
1054 if (frozen && cpu != policy->cpu)
1055 update_policy_cpu(policy, cpu);
1059 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1060 cpumask_copy(policy->cpus, cpumask_of(cpu));
1062 init_completion(&policy->kobj_unregister);
1063 INIT_WORK(&policy->update, handle_update);
1065 /* call driver. From then on the cpufreq must be able
1066 * to accept all calls to ->verify and ->setpolicy for this CPU
1068 ret = cpufreq_driver->init(policy);
1070 pr_debug("initialization failed\n");
1071 goto err_set_policy_cpu;
1074 if (cpufreq_driver->get) {
1075 policy->cur = cpufreq_driver->get(policy->cpu);
1077 pr_err("%s: ->get() failed\n", __func__);
1082 /* related cpus should atleast have policy->cpus */
1083 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1086 * affected cpus must always be the one, which are online. We aren't
1087 * managing offline cpus here.
1089 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1091 policy->user_policy.min = policy->min;
1092 policy->user_policy.max = policy->max;
1094 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1095 CPUFREQ_START, policy);
1097 #ifdef CONFIG_HOTPLUG_CPU
1098 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1100 policy->governor = gov;
1101 pr_debug("Restoring governor %s for cpu %d\n",
1102 policy->governor->name, cpu);
1106 write_lock_irqsave(&cpufreq_driver_lock, flags);
1107 for_each_cpu(j, policy->cpus)
1108 per_cpu(cpufreq_cpu_data, j) = policy;
1109 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1112 ret = cpufreq_add_dev_interface(policy, dev);
1114 goto err_out_unregister;
1117 write_lock_irqsave(&cpufreq_driver_lock, flags);
1118 list_add(&policy->policy_list, &cpufreq_policy_list);
1119 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1121 cpufreq_init_policy(policy);
1123 kobject_uevent(&policy->kobj, KOBJ_ADD);
1124 up_read(&cpufreq_rwsem);
1126 pr_debug("initialization complete\n");
1131 write_lock_irqsave(&cpufreq_driver_lock, flags);
1132 for_each_cpu(j, policy->cpus)
1133 per_cpu(cpufreq_cpu_data, j) = NULL;
1134 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1137 if (cpufreq_driver->exit)
1138 cpufreq_driver->exit(policy);
1140 cpufreq_policy_free(policy);
1142 up_read(&cpufreq_rwsem);
1148 * cpufreq_add_dev - add a CPU device
1150 * Adds the cpufreq interface for a CPU device.
1152 * The Oracle says: try running cpufreq registration/unregistration concurrently
1153 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1154 * mess up, but more thorough testing is needed. - Mathieu
1156 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1158 return __cpufreq_add_dev(dev, sif, false);
1161 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1162 unsigned int old_cpu, bool frozen)
1164 struct device *cpu_dev;
1167 /* first sibling now owns the new sysfs dir */
1168 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1170 /* Don't touch sysfs files during light-weight tear-down */
1174 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1175 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1177 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1179 lock_policy_rwsem_write(old_cpu);
1180 cpumask_set_cpu(old_cpu, policy->cpus);
1181 unlock_policy_rwsem_write(old_cpu);
1183 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1192 static int __cpufreq_remove_dev_prepare(struct device *dev,
1193 struct subsys_interface *sif,
1196 unsigned int cpu = dev->id, cpus;
1198 unsigned long flags;
1199 struct cpufreq_policy *policy;
1201 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1203 write_lock_irqsave(&cpufreq_driver_lock, flags);
1205 policy = per_cpu(cpufreq_cpu_data, cpu);
1207 /* Save the policy somewhere when doing a light-weight tear-down */
1209 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1211 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1214 pr_debug("%s: No cpu_data found\n", __func__);
1218 if (cpufreq_driver->target) {
1219 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1221 pr_err("%s: Failed to stop governor\n", __func__);
1226 #ifdef CONFIG_HOTPLUG_CPU
1227 if (!cpufreq_driver->setpolicy)
1228 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1229 policy->governor->name, CPUFREQ_NAME_LEN);
1232 lock_policy_rwsem_read(cpu);
1233 cpus = cpumask_weight(policy->cpus);
1234 unlock_policy_rwsem_read(cpu);
1236 if (cpu != policy->cpu) {
1238 sysfs_remove_link(&dev->kobj, "cpufreq");
1239 } else if (cpus > 1) {
1240 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1242 update_policy_cpu(policy, new_cpu);
1245 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1246 __func__, new_cpu, cpu);
1254 static int __cpufreq_remove_dev_finish(struct device *dev,
1255 struct subsys_interface *sif,
1258 unsigned int cpu = dev->id, cpus;
1260 unsigned long flags;
1261 struct cpufreq_policy *policy;
1262 struct kobject *kobj;
1263 struct completion *cmp;
1265 read_lock_irqsave(&cpufreq_driver_lock, flags);
1266 policy = per_cpu(cpufreq_cpu_data, cpu);
1267 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1270 pr_debug("%s: No cpu_data found\n", __func__);
1274 lock_policy_rwsem_write(cpu);
1275 cpus = cpumask_weight(policy->cpus);
1278 cpumask_clear_cpu(cpu, policy->cpus);
1279 unlock_policy_rwsem_write(cpu);
1281 /* If cpu is last user of policy, free policy */
1283 if (cpufreq_driver->target) {
1284 ret = __cpufreq_governor(policy,
1285 CPUFREQ_GOV_POLICY_EXIT);
1287 pr_err("%s: Failed to exit governor\n",
1294 lock_policy_rwsem_read(cpu);
1295 kobj = &policy->kobj;
1296 cmp = &policy->kobj_unregister;
1297 unlock_policy_rwsem_read(cpu);
1301 * We need to make sure that the underlying kobj is
1302 * actually not referenced anymore by anybody before we
1303 * proceed with unloading.
1305 pr_debug("waiting for dropping of refcount\n");
1306 wait_for_completion(cmp);
1307 pr_debug("wait complete\n");
1311 * Perform the ->exit() even during light-weight tear-down,
1312 * since this is a core component, and is essential for the
1313 * subsequent light-weight ->init() to succeed.
1315 if (cpufreq_driver->exit)
1316 cpufreq_driver->exit(policy);
1318 /* Remove policy from list of active policies */
1319 write_lock_irqsave(&cpufreq_driver_lock, flags);
1320 list_del(&policy->policy_list);
1321 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1324 cpufreq_policy_free(policy);
1326 if (cpufreq_driver->target) {
1327 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1328 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1329 pr_err("%s: Failed to start governor\n",
1336 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1341 * cpufreq_remove_dev - remove a CPU device
1343 * Removes the cpufreq interface for a CPU device.
1345 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1347 unsigned int cpu = dev->id;
1350 if (cpu_is_offline(cpu))
1353 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1356 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1361 static void handle_update(struct work_struct *work)
1363 struct cpufreq_policy *policy =
1364 container_of(work, struct cpufreq_policy, update);
1365 unsigned int cpu = policy->cpu;
1366 pr_debug("handle_update for cpu %u called\n", cpu);
1367 cpufreq_update_policy(cpu);
1371 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1374 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1375 * @new_freq: CPU frequency the CPU actually runs at
1377 * We adjust to current frequency first, and need to clean up later.
1378 * So either call to cpufreq_update_policy() or schedule handle_update()).
1380 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1381 unsigned int new_freq)
1383 struct cpufreq_policy *policy;
1384 struct cpufreq_freqs freqs;
1385 unsigned long flags;
1387 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1388 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1390 freqs.old = old_freq;
1391 freqs.new = new_freq;
1393 read_lock_irqsave(&cpufreq_driver_lock, flags);
1394 policy = per_cpu(cpufreq_cpu_data, cpu);
1395 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1397 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1398 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1402 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1405 * This is the last known freq, without actually getting it from the driver.
1406 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1408 unsigned int cpufreq_quick_get(unsigned int cpu)
1410 struct cpufreq_policy *policy;
1411 unsigned int ret_freq = 0;
1413 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1414 return cpufreq_driver->get(cpu);
1416 policy = cpufreq_cpu_get(cpu);
1418 ret_freq = policy->cur;
1419 cpufreq_cpu_put(policy);
1424 EXPORT_SYMBOL(cpufreq_quick_get);
1427 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1430 * Just return the max possible frequency for a given CPU.
1432 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1434 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1435 unsigned int ret_freq = 0;
1438 ret_freq = policy->max;
1439 cpufreq_cpu_put(policy);
1444 EXPORT_SYMBOL(cpufreq_quick_get_max);
1446 static unsigned int __cpufreq_get(unsigned int cpu)
1448 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1449 unsigned int ret_freq = 0;
1451 if (!cpufreq_driver->get)
1454 ret_freq = cpufreq_driver->get(cpu);
1456 if (ret_freq && policy->cur &&
1457 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1458 /* verify no discrepancy between actual and
1459 saved value exists */
1460 if (unlikely(ret_freq != policy->cur)) {
1461 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1462 schedule_work(&policy->update);
1470 * cpufreq_get - get the current CPU frequency (in kHz)
1473 * Get the CPU current (static) CPU frequency
1475 unsigned int cpufreq_get(unsigned int cpu)
1477 unsigned int ret_freq = 0;
1479 if (cpufreq_disabled() || !cpufreq_driver)
1482 if (!down_read_trylock(&cpufreq_rwsem))
1485 lock_policy_rwsem_read(cpu);
1487 ret_freq = __cpufreq_get(cpu);
1489 unlock_policy_rwsem_read(cpu);
1490 up_read(&cpufreq_rwsem);
1494 EXPORT_SYMBOL(cpufreq_get);
1496 static struct subsys_interface cpufreq_interface = {
1498 .subsys = &cpu_subsys,
1499 .add_dev = cpufreq_add_dev,
1500 .remove_dev = cpufreq_remove_dev,
1504 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1506 * This function is only executed for the boot processor. The other CPUs
1507 * have been put offline by means of CPU hotplug.
1509 static int cpufreq_bp_suspend(void)
1513 int cpu = smp_processor_id();
1514 struct cpufreq_policy *policy;
1516 pr_debug("suspending cpu %u\n", cpu);
1518 /* If there's no policy for the boot CPU, we have nothing to do. */
1519 policy = cpufreq_cpu_get(cpu);
1523 if (cpufreq_driver->suspend) {
1524 ret = cpufreq_driver->suspend(policy);
1526 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1527 "step on CPU %u\n", policy->cpu);
1530 cpufreq_cpu_put(policy);
1535 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1537 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1538 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1539 * restored. It will verify that the current freq is in sync with
1540 * what we believe it to be. This is a bit later than when it
1541 * should be, but nonethteless it's better than calling
1542 * cpufreq_driver->get() here which might re-enable interrupts...
1544 * This function is only executed for the boot CPU. The other CPUs have not
1545 * been turned on yet.
1547 static void cpufreq_bp_resume(void)
1551 int cpu = smp_processor_id();
1552 struct cpufreq_policy *policy;
1554 pr_debug("resuming cpu %u\n", cpu);
1556 /* If there's no policy for the boot CPU, we have nothing to do. */
1557 policy = cpufreq_cpu_get(cpu);
1561 if (cpufreq_driver->resume) {
1562 ret = cpufreq_driver->resume(policy);
1564 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1565 "step on CPU %u\n", policy->cpu);
1570 schedule_work(&policy->update);
1573 cpufreq_cpu_put(policy);
1576 static struct syscore_ops cpufreq_syscore_ops = {
1577 .suspend = cpufreq_bp_suspend,
1578 .resume = cpufreq_bp_resume,
1582 * cpufreq_get_current_driver - return current driver's name
1584 * Return the name string of the currently loaded cpufreq driver
1587 const char *cpufreq_get_current_driver(void)
1590 return cpufreq_driver->name;
1594 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1596 /*********************************************************************
1597 * NOTIFIER LISTS INTERFACE *
1598 *********************************************************************/
1601 * cpufreq_register_notifier - register a driver with cpufreq
1602 * @nb: notifier function to register
1603 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1605 * Add a driver to one of two lists: either a list of drivers that
1606 * are notified about clock rate changes (once before and once after
1607 * the transition), or a list of drivers that are notified about
1608 * changes in cpufreq policy.
1610 * This function may sleep, and has the same return conditions as
1611 * blocking_notifier_chain_register.
1613 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1617 if (cpufreq_disabled())
1620 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1623 case CPUFREQ_TRANSITION_NOTIFIER:
1624 ret = srcu_notifier_chain_register(
1625 &cpufreq_transition_notifier_list, nb);
1627 case CPUFREQ_POLICY_NOTIFIER:
1628 ret = blocking_notifier_chain_register(
1629 &cpufreq_policy_notifier_list, nb);
1637 EXPORT_SYMBOL(cpufreq_register_notifier);
1640 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1641 * @nb: notifier block to be unregistered
1642 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1644 * Remove a driver from the CPU frequency notifier list.
1646 * This function may sleep, and has the same return conditions as
1647 * blocking_notifier_chain_unregister.
1649 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1653 if (cpufreq_disabled())
1657 case CPUFREQ_TRANSITION_NOTIFIER:
1658 ret = srcu_notifier_chain_unregister(
1659 &cpufreq_transition_notifier_list, nb);
1661 case CPUFREQ_POLICY_NOTIFIER:
1662 ret = blocking_notifier_chain_unregister(
1663 &cpufreq_policy_notifier_list, nb);
1671 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1674 /*********************************************************************
1676 *********************************************************************/
1678 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1679 unsigned int target_freq,
1680 unsigned int relation)
1682 int retval = -EINVAL;
1683 unsigned int old_target_freq = target_freq;
1685 if (cpufreq_disabled())
1688 /* Make sure that target_freq is within supported range */
1689 if (target_freq > policy->max)
1690 target_freq = policy->max;
1691 if (target_freq < policy->min)
1692 target_freq = policy->min;
1694 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1695 policy->cpu, target_freq, relation, old_target_freq);
1697 if (target_freq == policy->cur)
1700 if (cpufreq_driver->target)
1701 retval = cpufreq_driver->target(policy, target_freq, relation);
1705 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1707 int cpufreq_driver_target(struct cpufreq_policy *policy,
1708 unsigned int target_freq,
1709 unsigned int relation)
1713 lock_policy_rwsem_write(policy->cpu);
1715 ret = __cpufreq_driver_target(policy, target_freq, relation);
1717 unlock_policy_rwsem_write(policy->cpu);
1721 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1724 * when "event" is CPUFREQ_GOV_LIMITS
1727 static int __cpufreq_governor(struct cpufreq_policy *policy,
1732 /* Only must be defined when default governor is known to have latency
1733 restrictions, like e.g. conservative or ondemand.
1734 That this is the case is already ensured in Kconfig
1736 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1737 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1739 struct cpufreq_governor *gov = NULL;
1742 if (policy->governor->max_transition_latency &&
1743 policy->cpuinfo.transition_latency >
1744 policy->governor->max_transition_latency) {
1748 printk(KERN_WARNING "%s governor failed, too long"
1749 " transition latency of HW, fallback"
1750 " to %s governor\n",
1751 policy->governor->name,
1753 policy->governor = gov;
1757 if (event == CPUFREQ_GOV_POLICY_INIT)
1758 if (!try_module_get(policy->governor->owner))
1761 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1762 policy->cpu, event);
1764 mutex_lock(&cpufreq_governor_lock);
1765 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1766 || (!policy->governor_enabled
1767 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1768 mutex_unlock(&cpufreq_governor_lock);
1772 if (event == CPUFREQ_GOV_STOP)
1773 policy->governor_enabled = false;
1774 else if (event == CPUFREQ_GOV_START)
1775 policy->governor_enabled = true;
1777 mutex_unlock(&cpufreq_governor_lock);
1779 ret = policy->governor->governor(policy, event);
1782 if (event == CPUFREQ_GOV_POLICY_INIT)
1783 policy->governor->initialized++;
1784 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1785 policy->governor->initialized--;
1787 /* Restore original values */
1788 mutex_lock(&cpufreq_governor_lock);
1789 if (event == CPUFREQ_GOV_STOP)
1790 policy->governor_enabled = true;
1791 else if (event == CPUFREQ_GOV_START)
1792 policy->governor_enabled = false;
1793 mutex_unlock(&cpufreq_governor_lock);
1796 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1797 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1798 module_put(policy->governor->owner);
1803 int cpufreq_register_governor(struct cpufreq_governor *governor)
1810 if (cpufreq_disabled())
1813 mutex_lock(&cpufreq_governor_mutex);
1815 governor->initialized = 0;
1817 if (__find_governor(governor->name) == NULL) {
1819 list_add(&governor->governor_list, &cpufreq_governor_list);
1822 mutex_unlock(&cpufreq_governor_mutex);
1825 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1827 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1829 #ifdef CONFIG_HOTPLUG_CPU
1836 if (cpufreq_disabled())
1839 #ifdef CONFIG_HOTPLUG_CPU
1840 for_each_present_cpu(cpu) {
1841 if (cpu_online(cpu))
1843 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1844 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1848 mutex_lock(&cpufreq_governor_mutex);
1849 list_del(&governor->governor_list);
1850 mutex_unlock(&cpufreq_governor_mutex);
1853 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1856 /*********************************************************************
1857 * POLICY INTERFACE *
1858 *********************************************************************/
1861 * cpufreq_get_policy - get the current cpufreq_policy
1862 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1865 * Reads the current cpufreq policy.
1867 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1869 struct cpufreq_policy *cpu_policy;
1873 cpu_policy = cpufreq_cpu_get(cpu);
1877 memcpy(policy, cpu_policy, sizeof(*policy));
1879 cpufreq_cpu_put(cpu_policy);
1882 EXPORT_SYMBOL(cpufreq_get_policy);
1885 * policy : current policy.
1886 * new_policy: policy to be set.
1888 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1889 struct cpufreq_policy *new_policy)
1891 int ret = 0, failed = 1;
1893 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1894 new_policy->min, new_policy->max);
1896 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1898 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1903 /* verify the cpu speed can be set within this limit */
1904 ret = cpufreq_driver->verify(new_policy);
1908 /* adjust if necessary - all reasons */
1909 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1910 CPUFREQ_ADJUST, new_policy);
1912 /* adjust if necessary - hardware incompatibility*/
1913 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1914 CPUFREQ_INCOMPATIBLE, new_policy);
1917 * verify the cpu speed can be set within this limit, which might be
1918 * different to the first one
1920 ret = cpufreq_driver->verify(new_policy);
1924 /* notification of the new policy */
1925 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1926 CPUFREQ_NOTIFY, new_policy);
1928 policy->min = new_policy->min;
1929 policy->max = new_policy->max;
1931 pr_debug("new min and max freqs are %u - %u kHz\n",
1932 policy->min, policy->max);
1934 if (cpufreq_driver->setpolicy) {
1935 policy->policy = new_policy->policy;
1936 pr_debug("setting range\n");
1937 ret = cpufreq_driver->setpolicy(new_policy);
1939 if (new_policy->governor != policy->governor) {
1940 /* save old, working values */
1941 struct cpufreq_governor *old_gov = policy->governor;
1943 pr_debug("governor switch\n");
1945 /* end old governor */
1946 if (policy->governor) {
1947 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1948 unlock_policy_rwsem_write(new_policy->cpu);
1949 __cpufreq_governor(policy,
1950 CPUFREQ_GOV_POLICY_EXIT);
1951 lock_policy_rwsem_write(new_policy->cpu);
1954 /* start new governor */
1955 policy->governor = new_policy->governor;
1956 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1957 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1960 unlock_policy_rwsem_write(new_policy->cpu);
1961 __cpufreq_governor(policy,
1962 CPUFREQ_GOV_POLICY_EXIT);
1963 lock_policy_rwsem_write(new_policy->cpu);
1968 /* new governor failed, so re-start old one */
1969 pr_debug("starting governor %s failed\n",
1970 policy->governor->name);
1972 policy->governor = old_gov;
1973 __cpufreq_governor(policy,
1974 CPUFREQ_GOV_POLICY_INIT);
1975 __cpufreq_governor(policy,
1981 /* might be a policy change, too, so fall through */
1983 pr_debug("governor: change or update limits\n");
1984 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1992 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1993 * @cpu: CPU which shall be re-evaluated
1995 * Useful for policy notifiers which have different necessities
1996 * at different times.
1998 int cpufreq_update_policy(unsigned int cpu)
2000 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2001 struct cpufreq_policy new_policy;
2009 lock_policy_rwsem_write(cpu);
2011 pr_debug("updating policy for CPU %u\n", cpu);
2012 memcpy(&new_policy, policy, sizeof(*policy));
2013 new_policy.min = policy->user_policy.min;
2014 new_policy.max = policy->user_policy.max;
2015 new_policy.policy = policy->user_policy.policy;
2016 new_policy.governor = policy->user_policy.governor;
2019 * BIOS might change freq behind our back
2020 * -> ask driver for current freq and notify governors about a change
2022 if (cpufreq_driver->get) {
2023 new_policy.cur = cpufreq_driver->get(cpu);
2025 pr_debug("Driver did not initialize current freq");
2026 policy->cur = new_policy.cur;
2028 if (policy->cur != new_policy.cur && cpufreq_driver->target)
2029 cpufreq_out_of_sync(cpu, policy->cur,
2034 ret = cpufreq_set_policy(policy, &new_policy);
2036 unlock_policy_rwsem_write(cpu);
2038 cpufreq_cpu_put(policy);
2042 EXPORT_SYMBOL(cpufreq_update_policy);
2044 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2045 unsigned long action, void *hcpu)
2047 unsigned int cpu = (unsigned long)hcpu;
2049 bool frozen = false;
2051 dev = get_cpu_device(cpu);
2054 if (action & CPU_TASKS_FROZEN)
2057 switch (action & ~CPU_TASKS_FROZEN) {
2059 __cpufreq_add_dev(dev, NULL, frozen);
2060 cpufreq_update_policy(cpu);
2063 case CPU_DOWN_PREPARE:
2064 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2068 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2071 case CPU_DOWN_FAILED:
2072 __cpufreq_add_dev(dev, NULL, frozen);
2079 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2080 .notifier_call = cpufreq_cpu_callback,
2083 /*********************************************************************
2084 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2085 *********************************************************************/
2088 * cpufreq_register_driver - register a CPU Frequency driver
2089 * @driver_data: A struct cpufreq_driver containing the values#
2090 * submitted by the CPU Frequency driver.
2092 * Registers a CPU Frequency driver to this core code. This code
2093 * returns zero on success, -EBUSY when another driver got here first
2094 * (and isn't unregistered in the meantime).
2097 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2099 unsigned long flags;
2102 if (cpufreq_disabled())
2105 if (!driver_data || !driver_data->verify || !driver_data->init ||
2106 ((!driver_data->setpolicy) && (!driver_data->target)))
2109 pr_debug("trying to register driver %s\n", driver_data->name);
2111 if (driver_data->setpolicy)
2112 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2114 write_lock_irqsave(&cpufreq_driver_lock, flags);
2115 if (cpufreq_driver) {
2116 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2119 cpufreq_driver = driver_data;
2120 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2122 ret = subsys_interface_register(&cpufreq_interface);
2124 goto err_null_driver;
2126 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2130 /* check for at least one working CPU */
2131 for (i = 0; i < nr_cpu_ids; i++)
2132 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2137 /* if all ->init() calls failed, unregister */
2139 pr_debug("no CPU initialized for driver %s\n",
2145 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2146 pr_debug("driver %s up and running\n", driver_data->name);
2150 subsys_interface_unregister(&cpufreq_interface);
2152 write_lock_irqsave(&cpufreq_driver_lock, flags);
2153 cpufreq_driver = NULL;
2154 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2157 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2160 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2162 * Unregister the current CPUFreq driver. Only call this if you have
2163 * the right to do so, i.e. if you have succeeded in initialising before!
2164 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2165 * currently not initialised.
2167 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2169 unsigned long flags;
2171 if (!cpufreq_driver || (driver != cpufreq_driver))
2174 pr_debug("unregistering driver %s\n", driver->name);
2176 subsys_interface_unregister(&cpufreq_interface);
2177 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2179 down_write(&cpufreq_rwsem);
2180 write_lock_irqsave(&cpufreq_driver_lock, flags);
2182 cpufreq_driver = NULL;
2184 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2185 up_write(&cpufreq_rwsem);
2189 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2191 static int __init cpufreq_core_init(void)
2195 if (cpufreq_disabled())
2198 for_each_possible_cpu(cpu)
2199 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2201 cpufreq_global_kobject = kobject_create();
2202 BUG_ON(!cpufreq_global_kobject);
2203 register_syscore_ops(&cpufreq_syscore_ops);
2207 core_initcall(cpufreq_core_init);