2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 static inline bool has_target(void)
52 return cpufreq_driver->target_index || cpufreq_driver->target;
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
59 static DECLARE_RWSEM(cpufreq_rwsem);
61 /* internal prototypes */
62 static int __cpufreq_governor(struct cpufreq_policy *policy,
64 static unsigned int __cpufreq_get(unsigned int cpu);
65 static void handle_update(struct work_struct *work);
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
74 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
75 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77 static bool init_cpufreq_transition_notifier_list_called;
78 static int __init init_cpufreq_transition_notifier_list(void)
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
81 init_cpufreq_transition_notifier_list_called = true;
84 pure_initcall(init_cpufreq_transition_notifier_list);
86 static int off __read_mostly;
87 static int cpufreq_disabled(void)
91 void disable_cpufreq(void)
95 static LIST_HEAD(cpufreq_governor_list);
96 static DEFINE_MUTEX(cpufreq_governor_mutex);
98 bool have_governor_per_policy(void)
100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 if (have_governor_per_policy())
107 return &policy->kobj;
109 return cpufreq_global_kobject;
111 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128 idle_time = cur_wall_time - busy_time;
130 *wall = cputime_to_usecs(cur_wall_time);
132 return cputime_to_usecs(idle_time);
135 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
146 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
155 int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
161 ret = cpufreq_table_validate_and_show(policy, table);
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
167 policy->cpuinfo.transition_latency = transition_latency;
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
173 cpumask_setall(policy->cpus);
177 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179 unsigned int cpufreq_generic_get(unsigned int cpu)
181 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183 if (!policy || IS_ERR(policy->clk)) {
184 pr_err("%s: No %s associated to cpu: %d\n", __func__,
185 policy ? "clk" : "policy", cpu);
189 return clk_get_rate(policy->clk) / 1000;
191 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
195 struct cpufreq_policy *policy = NULL;
198 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
201 if (!down_read_trylock(&cpufreq_rwsem))
204 /* get the cpufreq driver */
205 read_lock_irqsave(&cpufreq_driver_lock, flags);
207 if (cpufreq_driver) {
209 policy = per_cpu(cpufreq_cpu_data, cpu);
211 kobject_get(&policy->kobj);
214 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
217 up_read(&cpufreq_rwsem);
221 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
223 void cpufreq_cpu_put(struct cpufreq_policy *policy)
225 if (cpufreq_disabled())
228 kobject_put(&policy->kobj);
229 up_read(&cpufreq_rwsem);
231 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
233 /*********************************************************************
234 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
235 *********************************************************************/
238 * adjust_jiffies - adjust the system "loops_per_jiffy"
240 * This function alters the system "loops_per_jiffy" for the clock
241 * speed change. Note that loops_per_jiffy cannot be updated on SMP
242 * systems as each CPU might be scaled differently. So, use the arch
243 * per-CPU loops_per_jiffy value wherever possible.
246 static unsigned long l_p_j_ref;
247 static unsigned int l_p_j_ref_freq;
249 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
251 if (ci->flags & CPUFREQ_CONST_LOOPS)
254 if (!l_p_j_ref_freq) {
255 l_p_j_ref = loops_per_jiffy;
256 l_p_j_ref_freq = ci->old;
257 pr_debug("saving %lu as reference value for loops_per_jiffy; "
258 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
260 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
261 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
262 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
264 pr_debug("scaling loops_per_jiffy to %lu "
265 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
269 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
276 struct cpufreq_freqs *freqs, unsigned int state)
278 BUG_ON(irqs_disabled());
280 if (cpufreq_disabled())
283 freqs->flags = cpufreq_driver->flags;
284 pr_debug("notification %u of frequency transition to %u kHz\n",
289 case CPUFREQ_PRECHANGE:
290 /* detect if the driver reported a value as "old frequency"
291 * which is not equal to what the cpufreq core thinks is
294 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
295 if ((policy) && (policy->cpu == freqs->cpu) &&
296 (policy->cur) && (policy->cur != freqs->old)) {
297 pr_debug("Warning: CPU frequency is"
298 " %u, cpufreq assumed %u kHz.\n",
299 freqs->old, policy->cur);
300 freqs->old = policy->cur;
303 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
304 CPUFREQ_PRECHANGE, freqs);
305 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
308 case CPUFREQ_POSTCHANGE:
309 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
310 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
311 (unsigned long)freqs->cpu);
312 trace_cpu_frequency(freqs->new, freqs->cpu);
313 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
314 CPUFREQ_POSTCHANGE, freqs);
315 if (likely(policy) && likely(policy->cpu == freqs->cpu))
316 policy->cur = freqs->new;
322 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
323 * on frequency transition.
325 * This function calls the transition notifiers and the "adjust_jiffies"
326 * function. It is called twice on all CPU frequency changes that have
329 void cpufreq_notify_transition(struct cpufreq_policy *policy,
330 struct cpufreq_freqs *freqs, unsigned int state)
332 for_each_cpu(freqs->cpu, policy->cpus)
333 __cpufreq_notify_transition(policy, freqs, state);
335 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
337 /* Do post notifications when there are chances that transition has failed */
338 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs, int transition_failed)
341 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
342 if (!transition_failed)
345 swap(freqs->old, freqs->new);
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
349 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
352 /*********************************************************************
354 *********************************************************************/
355 ssize_t show_boost(struct kobject *kobj,
356 struct attribute *attr, char *buf)
358 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
361 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
362 const char *buf, size_t count)
366 ret = sscanf(buf, "%d", &enable);
367 if (ret != 1 || enable < 0 || enable > 1)
370 if (cpufreq_boost_trigger_state(enable)) {
371 pr_err("%s: Cannot %s BOOST!\n", __func__,
372 enable ? "enable" : "disable");
376 pr_debug("%s: cpufreq BOOST %s\n", __func__,
377 enable ? "enabled" : "disabled");
381 define_one_global_rw(boost);
383 static struct cpufreq_governor *__find_governor(const char *str_governor)
385 struct cpufreq_governor *t;
387 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
388 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
395 * cpufreq_parse_governor - parse a governor string
397 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
398 struct cpufreq_governor **governor)
405 if (cpufreq_driver->setpolicy) {
406 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
407 *policy = CPUFREQ_POLICY_PERFORMANCE;
409 } else if (!strnicmp(str_governor, "powersave",
411 *policy = CPUFREQ_POLICY_POWERSAVE;
414 } else if (has_target()) {
415 struct cpufreq_governor *t;
417 mutex_lock(&cpufreq_governor_mutex);
419 t = __find_governor(str_governor);
424 mutex_unlock(&cpufreq_governor_mutex);
425 ret = request_module("cpufreq_%s", str_governor);
426 mutex_lock(&cpufreq_governor_mutex);
429 t = __find_governor(str_governor);
437 mutex_unlock(&cpufreq_governor_mutex);
444 * cpufreq_per_cpu_attr_read() / show_##file_name() -
445 * print out cpufreq information
447 * Write out information from cpufreq_driver->policy[cpu]; object must be
451 #define show_one(file_name, object) \
452 static ssize_t show_##file_name \
453 (struct cpufreq_policy *policy, char *buf) \
455 return sprintf(buf, "%u\n", policy->object); \
458 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
459 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
460 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
461 show_one(scaling_min_freq, min);
462 show_one(scaling_max_freq, max);
463 show_one(scaling_cur_freq, cur);
465 static int cpufreq_set_policy(struct cpufreq_policy *policy,
466 struct cpufreq_policy *new_policy);
469 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
471 #define store_one(file_name, object) \
472 static ssize_t store_##file_name \
473 (struct cpufreq_policy *policy, const char *buf, size_t count) \
476 struct cpufreq_policy new_policy; \
478 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
482 ret = sscanf(buf, "%u", &new_policy.object); \
486 ret = cpufreq_set_policy(policy, &new_policy); \
487 policy->user_policy.object = policy->object; \
489 return ret ? ret : count; \
492 store_one(scaling_min_freq, min);
493 store_one(scaling_max_freq, max);
496 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
498 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
501 unsigned int cur_freq = __cpufreq_get(policy->cpu);
503 return sprintf(buf, "<unknown>");
504 return sprintf(buf, "%u\n", cur_freq);
508 * show_scaling_governor - show the current policy for the specified CPU
510 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
512 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
513 return sprintf(buf, "powersave\n");
514 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
515 return sprintf(buf, "performance\n");
516 else if (policy->governor)
517 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
518 policy->governor->name);
523 * store_scaling_governor - store policy for the specified CPU
525 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
526 const char *buf, size_t count)
529 char str_governor[16];
530 struct cpufreq_policy new_policy;
532 ret = cpufreq_get_policy(&new_policy, policy->cpu);
536 ret = sscanf(buf, "%15s", str_governor);
540 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
541 &new_policy.governor))
544 ret = cpufreq_set_policy(policy, &new_policy);
546 policy->user_policy.policy = policy->policy;
547 policy->user_policy.governor = policy->governor;
556 * show_scaling_driver - show the cpufreq driver currently loaded
558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
560 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
564 * show_scaling_available_governors - show the available CPUfreq governors
566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
570 struct cpufreq_governor *t;
573 i += sprintf(buf, "performance powersave");
577 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
579 - (CPUFREQ_NAME_LEN + 2)))
581 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
584 i += sprintf(&buf[i], "\n");
588 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
593 for_each_cpu(cpu, mask) {
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
597 if (i >= (PAGE_SIZE - 5))
600 i += sprintf(&buf[i], "\n");
603 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
606 * show_related_cpus - show the CPUs affected by each transition even if
607 * hw coordination is in use
609 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
611 return cpufreq_show_cpus(policy->related_cpus, buf);
615 * show_affected_cpus - show the CPUs affected by each transition
617 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
619 return cpufreq_show_cpus(policy->cpus, buf);
622 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
623 const char *buf, size_t count)
625 unsigned int freq = 0;
628 if (!policy->governor || !policy->governor->store_setspeed)
631 ret = sscanf(buf, "%u", &freq);
635 policy->governor->store_setspeed(policy, freq);
640 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
642 if (!policy->governor || !policy->governor->show_setspeed)
643 return sprintf(buf, "<unsupported>\n");
645 return policy->governor->show_setspeed(policy, buf);
649 * show_bios_limit - show the current cpufreq HW/BIOS limitation
651 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
655 if (cpufreq_driver->bios_limit) {
656 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
658 return sprintf(buf, "%u\n", limit);
660 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
664 cpufreq_freq_attr_ro(cpuinfo_min_freq);
665 cpufreq_freq_attr_ro(cpuinfo_max_freq);
666 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
667 cpufreq_freq_attr_ro(scaling_available_governors);
668 cpufreq_freq_attr_ro(scaling_driver);
669 cpufreq_freq_attr_ro(scaling_cur_freq);
670 cpufreq_freq_attr_ro(bios_limit);
671 cpufreq_freq_attr_ro(related_cpus);
672 cpufreq_freq_attr_ro(affected_cpus);
673 cpufreq_freq_attr_rw(scaling_min_freq);
674 cpufreq_freq_attr_rw(scaling_max_freq);
675 cpufreq_freq_attr_rw(scaling_governor);
676 cpufreq_freq_attr_rw(scaling_setspeed);
678 static struct attribute *default_attrs[] = {
679 &cpuinfo_min_freq.attr,
680 &cpuinfo_max_freq.attr,
681 &cpuinfo_transition_latency.attr,
682 &scaling_min_freq.attr,
683 &scaling_max_freq.attr,
686 &scaling_governor.attr,
687 &scaling_driver.attr,
688 &scaling_available_governors.attr,
689 &scaling_setspeed.attr,
693 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
694 #define to_attr(a) container_of(a, struct freq_attr, attr)
696 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
698 struct cpufreq_policy *policy = to_policy(kobj);
699 struct freq_attr *fattr = to_attr(attr);
702 if (!down_read_trylock(&cpufreq_rwsem))
705 down_read(&policy->rwsem);
708 ret = fattr->show(policy, buf);
712 up_read(&policy->rwsem);
713 up_read(&cpufreq_rwsem);
718 static ssize_t store(struct kobject *kobj, struct attribute *attr,
719 const char *buf, size_t count)
721 struct cpufreq_policy *policy = to_policy(kobj);
722 struct freq_attr *fattr = to_attr(attr);
723 ssize_t ret = -EINVAL;
727 if (!cpu_online(policy->cpu))
730 if (!down_read_trylock(&cpufreq_rwsem))
733 down_write(&policy->rwsem);
736 ret = fattr->store(policy, buf, count);
740 up_write(&policy->rwsem);
742 up_read(&cpufreq_rwsem);
749 static void cpufreq_sysfs_release(struct kobject *kobj)
751 struct cpufreq_policy *policy = to_policy(kobj);
752 pr_debug("last reference is dropped\n");
753 complete(&policy->kobj_unregister);
756 static const struct sysfs_ops sysfs_ops = {
761 static struct kobj_type ktype_cpufreq = {
762 .sysfs_ops = &sysfs_ops,
763 .default_attrs = default_attrs,
764 .release = cpufreq_sysfs_release,
767 struct kobject *cpufreq_global_kobject;
768 EXPORT_SYMBOL(cpufreq_global_kobject);
770 static int cpufreq_global_kobject_usage;
772 int cpufreq_get_global_kobject(void)
774 if (!cpufreq_global_kobject_usage++)
775 return kobject_add(cpufreq_global_kobject,
776 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
780 EXPORT_SYMBOL(cpufreq_get_global_kobject);
782 void cpufreq_put_global_kobject(void)
784 if (!--cpufreq_global_kobject_usage)
785 kobject_del(cpufreq_global_kobject);
787 EXPORT_SYMBOL(cpufreq_put_global_kobject);
789 int cpufreq_sysfs_create_file(const struct attribute *attr)
791 int ret = cpufreq_get_global_kobject();
794 ret = sysfs_create_file(cpufreq_global_kobject, attr);
796 cpufreq_put_global_kobject();
801 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
803 void cpufreq_sysfs_remove_file(const struct attribute *attr)
805 sysfs_remove_file(cpufreq_global_kobject, attr);
806 cpufreq_put_global_kobject();
808 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
810 /* symlink affected CPUs */
811 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
816 for_each_cpu(j, policy->cpus) {
817 struct device *cpu_dev;
819 if (j == policy->cpu)
822 pr_debug("Adding link for CPU: %u\n", j);
823 cpu_dev = get_cpu_device(j);
824 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
832 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
835 struct freq_attr **drv_attr;
838 /* prepare interface data */
839 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
840 &dev->kobj, "cpufreq");
844 /* set up files for this cpu device */
845 drv_attr = cpufreq_driver->attr;
846 while ((drv_attr) && (*drv_attr)) {
847 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
849 goto err_out_kobj_put;
852 if (cpufreq_driver->get) {
853 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
855 goto err_out_kobj_put;
858 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
860 goto err_out_kobj_put;
862 if (cpufreq_driver->bios_limit) {
863 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
865 goto err_out_kobj_put;
868 ret = cpufreq_add_dev_symlink(policy);
870 goto err_out_kobj_put;
875 kobject_put(&policy->kobj);
876 wait_for_completion(&policy->kobj_unregister);
880 static void cpufreq_init_policy(struct cpufreq_policy *policy)
882 struct cpufreq_policy new_policy;
885 memcpy(&new_policy, policy, sizeof(*policy));
887 /* Use the default policy if its valid. */
888 if (cpufreq_driver->setpolicy)
889 cpufreq_parse_governor(policy->governor->name,
890 &new_policy.policy, NULL);
892 /* assure that the starting sequence is run in cpufreq_set_policy */
893 policy->governor = NULL;
895 /* set default policy */
896 ret = cpufreq_set_policy(policy, &new_policy);
898 pr_debug("setting policy failed\n");
899 if (cpufreq_driver->exit)
900 cpufreq_driver->exit(policy);
904 #ifdef CONFIG_HOTPLUG_CPU
905 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
906 unsigned int cpu, struct device *dev)
912 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
914 pr_err("%s: Failed to stop governor\n", __func__);
919 down_write(&policy->rwsem);
921 write_lock_irqsave(&cpufreq_driver_lock, flags);
923 cpumask_set_cpu(cpu, policy->cpus);
924 per_cpu(cpufreq_cpu_data, cpu) = policy;
925 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
927 up_write(&policy->rwsem);
930 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
931 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
932 pr_err("%s: Failed to start governor\n", __func__);
937 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
941 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
943 struct cpufreq_policy *policy;
946 read_lock_irqsave(&cpufreq_driver_lock, flags);
948 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
950 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
955 static struct cpufreq_policy *cpufreq_policy_alloc(void)
957 struct cpufreq_policy *policy;
959 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
963 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
964 goto err_free_policy;
966 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
967 goto err_free_cpumask;
969 INIT_LIST_HEAD(&policy->policy_list);
970 init_rwsem(&policy->rwsem);
975 free_cpumask_var(policy->cpus);
982 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
984 struct kobject *kobj;
985 struct completion *cmp;
987 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
988 CPUFREQ_REMOVE_POLICY, policy);
990 down_read(&policy->rwsem);
991 kobj = &policy->kobj;
992 cmp = &policy->kobj_unregister;
993 up_read(&policy->rwsem);
997 * We need to make sure that the underlying kobj is
998 * actually not referenced anymore by anybody before we
999 * proceed with unloading.
1001 pr_debug("waiting for dropping of refcount\n");
1002 wait_for_completion(cmp);
1003 pr_debug("wait complete\n");
1006 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1008 free_cpumask_var(policy->related_cpus);
1009 free_cpumask_var(policy->cpus);
1013 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1015 if (WARN_ON(cpu == policy->cpu))
1018 down_write(&policy->rwsem);
1020 policy->last_cpu = policy->cpu;
1023 up_write(&policy->rwsem);
1025 cpufreq_frequency_table_update_policy_cpu(policy);
1026 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1027 CPUFREQ_UPDATE_POLICY_CPU, policy);
1030 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1033 unsigned int j, cpu = dev->id;
1035 struct cpufreq_policy *policy;
1036 unsigned long flags;
1037 #ifdef CONFIG_HOTPLUG_CPU
1038 struct cpufreq_policy *tpolicy;
1039 struct cpufreq_governor *gov;
1042 if (cpu_is_offline(cpu))
1045 pr_debug("adding CPU %u\n", cpu);
1048 /* check whether a different CPU already registered this
1049 * CPU because it is in the same boat. */
1050 policy = cpufreq_cpu_get(cpu);
1051 if (unlikely(policy)) {
1052 cpufreq_cpu_put(policy);
1057 if (!down_read_trylock(&cpufreq_rwsem))
1060 #ifdef CONFIG_HOTPLUG_CPU
1061 /* Check if this cpu was hot-unplugged earlier and has siblings */
1062 read_lock_irqsave(&cpufreq_driver_lock, flags);
1063 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1064 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1065 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1066 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1067 up_read(&cpufreq_rwsem);
1071 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075 * Restore the saved policy when doing light-weight init and fall back
1076 * to the full init if that fails.
1078 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1081 policy = cpufreq_policy_alloc();
1087 * In the resume path, since we restore a saved policy, the assignment
1088 * to policy->cpu is like an update of the existing policy, rather than
1089 * the creation of a brand new one. So we need to perform this update
1090 * by invoking update_policy_cpu().
1092 if (frozen && cpu != policy->cpu)
1093 update_policy_cpu(policy, cpu);
1097 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1098 cpumask_copy(policy->cpus, cpumask_of(cpu));
1100 init_completion(&policy->kobj_unregister);
1101 INIT_WORK(&policy->update, handle_update);
1103 /* call driver. From then on the cpufreq must be able
1104 * to accept all calls to ->verify and ->setpolicy for this CPU
1106 ret = cpufreq_driver->init(policy);
1108 pr_debug("initialization failed\n");
1109 goto err_set_policy_cpu;
1112 /* related cpus should atleast have policy->cpus */
1113 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1116 * affected cpus must always be the one, which are online. We aren't
1117 * managing offline cpus here.
1119 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1122 policy->user_policy.min = policy->min;
1123 policy->user_policy.max = policy->max;
1126 down_write(&policy->rwsem);
1127 write_lock_irqsave(&cpufreq_driver_lock, flags);
1128 for_each_cpu(j, policy->cpus)
1129 per_cpu(cpufreq_cpu_data, j) = policy;
1130 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1132 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1133 policy->cur = cpufreq_driver->get(policy->cpu);
1135 pr_err("%s: ->get() failed\n", __func__);
1141 * Sometimes boot loaders set CPU frequency to a value outside of
1142 * frequency table present with cpufreq core. In such cases CPU might be
1143 * unstable if it has to run on that frequency for long duration of time
1144 * and so its better to set it to a frequency which is specified in
1145 * freq-table. This also makes cpufreq stats inconsistent as
1146 * cpufreq-stats would fail to register because current frequency of CPU
1147 * isn't found in freq-table.
1149 * Because we don't want this change to effect boot process badly, we go
1150 * for the next freq which is >= policy->cur ('cur' must be set by now,
1151 * otherwise we will end up setting freq to lowest of the table as 'cur'
1152 * is initialized to zero).
1154 * We are passing target-freq as "policy->cur - 1" otherwise
1155 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1156 * equal to target-freq.
1158 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1160 /* Are we running at unknown frequency ? */
1161 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1162 if (ret == -EINVAL) {
1163 /* Warn user and fix it */
1164 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1165 __func__, policy->cpu, policy->cur);
1166 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1167 CPUFREQ_RELATION_L);
1170 * Reaching here after boot in a few seconds may not
1171 * mean that system will remain stable at "unknown"
1172 * frequency for longer duration. Hence, a BUG_ON().
1175 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1176 __func__, policy->cpu, policy->cur);
1180 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1181 CPUFREQ_START, policy);
1183 #ifdef CONFIG_HOTPLUG_CPU
1184 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1186 policy->governor = gov;
1187 pr_debug("Restoring governor %s for cpu %d\n",
1188 policy->governor->name, cpu);
1193 ret = cpufreq_add_dev_interface(policy, dev);
1195 goto err_out_unregister;
1196 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1197 CPUFREQ_CREATE_POLICY, policy);
1200 write_lock_irqsave(&cpufreq_driver_lock, flags);
1201 list_add(&policy->policy_list, &cpufreq_policy_list);
1202 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1204 cpufreq_init_policy(policy);
1207 policy->user_policy.policy = policy->policy;
1208 policy->user_policy.governor = policy->governor;
1210 up_write(&policy->rwsem);
1212 kobject_uevent(&policy->kobj, KOBJ_ADD);
1213 up_read(&cpufreq_rwsem);
1215 pr_debug("initialization complete\n");
1221 write_lock_irqsave(&cpufreq_driver_lock, flags);
1222 for_each_cpu(j, policy->cpus)
1223 per_cpu(cpufreq_cpu_data, j) = NULL;
1224 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1226 if (cpufreq_driver->exit)
1227 cpufreq_driver->exit(policy);
1230 /* Do not leave stale fallback data behind. */
1231 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1232 cpufreq_policy_put_kobj(policy);
1234 cpufreq_policy_free(policy);
1237 up_read(&cpufreq_rwsem);
1243 * cpufreq_add_dev - add a CPU device
1245 * Adds the cpufreq interface for a CPU device.
1247 * The Oracle says: try running cpufreq registration/unregistration concurrently
1248 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1249 * mess up, but more thorough testing is needed. - Mathieu
1251 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1253 return __cpufreq_add_dev(dev, sif, false);
1256 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1257 unsigned int old_cpu)
1259 struct device *cpu_dev;
1262 /* first sibling now owns the new sysfs dir */
1263 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1265 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1266 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1268 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1270 down_write(&policy->rwsem);
1271 cpumask_set_cpu(old_cpu, policy->cpus);
1272 up_write(&policy->rwsem);
1274 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1283 static int __cpufreq_remove_dev_prepare(struct device *dev,
1284 struct subsys_interface *sif,
1287 unsigned int cpu = dev->id, cpus;
1289 unsigned long flags;
1290 struct cpufreq_policy *policy;
1292 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1294 write_lock_irqsave(&cpufreq_driver_lock, flags);
1296 policy = per_cpu(cpufreq_cpu_data, cpu);
1298 /* Save the policy somewhere when doing a light-weight tear-down */
1300 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1302 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1305 pr_debug("%s: No cpu_data found\n", __func__);
1310 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1312 pr_err("%s: Failed to stop governor\n", __func__);
1317 #ifdef CONFIG_HOTPLUG_CPU
1318 if (!cpufreq_driver->setpolicy)
1319 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1320 policy->governor->name, CPUFREQ_NAME_LEN);
1323 down_read(&policy->rwsem);
1324 cpus = cpumask_weight(policy->cpus);
1325 up_read(&policy->rwsem);
1327 if (cpu != policy->cpu) {
1328 sysfs_remove_link(&dev->kobj, "cpufreq");
1329 } else if (cpus > 1) {
1330 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1332 update_policy_cpu(policy, new_cpu);
1335 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1336 __func__, new_cpu, cpu);
1344 static int __cpufreq_remove_dev_finish(struct device *dev,
1345 struct subsys_interface *sif,
1348 unsigned int cpu = dev->id, cpus;
1350 unsigned long flags;
1351 struct cpufreq_policy *policy;
1353 read_lock_irqsave(&cpufreq_driver_lock, flags);
1354 policy = per_cpu(cpufreq_cpu_data, cpu);
1355 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1358 pr_debug("%s: No cpu_data found\n", __func__);
1362 down_write(&policy->rwsem);
1363 cpus = cpumask_weight(policy->cpus);
1366 cpumask_clear_cpu(cpu, policy->cpus);
1367 up_write(&policy->rwsem);
1369 /* If cpu is last user of policy, free policy */
1372 ret = __cpufreq_governor(policy,
1373 CPUFREQ_GOV_POLICY_EXIT);
1375 pr_err("%s: Failed to exit governor\n",
1382 cpufreq_policy_put_kobj(policy);
1385 * Perform the ->exit() even during light-weight tear-down,
1386 * since this is a core component, and is essential for the
1387 * subsequent light-weight ->init() to succeed.
1389 if (cpufreq_driver->exit)
1390 cpufreq_driver->exit(policy);
1392 /* Remove policy from list of active policies */
1393 write_lock_irqsave(&cpufreq_driver_lock, flags);
1394 list_del(&policy->policy_list);
1395 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1398 cpufreq_policy_free(policy);
1401 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1402 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1403 pr_err("%s: Failed to start governor\n",
1410 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1415 * cpufreq_remove_dev - remove a CPU device
1417 * Removes the cpufreq interface for a CPU device.
1419 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1421 unsigned int cpu = dev->id;
1424 if (cpu_is_offline(cpu))
1427 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1430 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1435 static void handle_update(struct work_struct *work)
1437 struct cpufreq_policy *policy =
1438 container_of(work, struct cpufreq_policy, update);
1439 unsigned int cpu = policy->cpu;
1440 pr_debug("handle_update for cpu %u called\n", cpu);
1441 cpufreq_update_policy(cpu);
1445 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1448 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1449 * @new_freq: CPU frequency the CPU actually runs at
1451 * We adjust to current frequency first, and need to clean up later.
1452 * So either call to cpufreq_update_policy() or schedule handle_update()).
1454 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1455 unsigned int new_freq)
1457 struct cpufreq_policy *policy;
1458 struct cpufreq_freqs freqs;
1459 unsigned long flags;
1461 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1462 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1464 freqs.old = old_freq;
1465 freqs.new = new_freq;
1467 read_lock_irqsave(&cpufreq_driver_lock, flags);
1468 policy = per_cpu(cpufreq_cpu_data, cpu);
1469 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1471 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1472 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1476 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1479 * This is the last known freq, without actually getting it from the driver.
1480 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1482 unsigned int cpufreq_quick_get(unsigned int cpu)
1484 struct cpufreq_policy *policy;
1485 unsigned int ret_freq = 0;
1487 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1488 return cpufreq_driver->get(cpu);
1490 policy = cpufreq_cpu_get(cpu);
1492 ret_freq = policy->cur;
1493 cpufreq_cpu_put(policy);
1498 EXPORT_SYMBOL(cpufreq_quick_get);
1501 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1504 * Just return the max possible frequency for a given CPU.
1506 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1508 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1509 unsigned int ret_freq = 0;
1512 ret_freq = policy->max;
1513 cpufreq_cpu_put(policy);
1518 EXPORT_SYMBOL(cpufreq_quick_get_max);
1520 static unsigned int __cpufreq_get(unsigned int cpu)
1522 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1523 unsigned int ret_freq = 0;
1525 if (!cpufreq_driver->get)
1528 ret_freq = cpufreq_driver->get(cpu);
1530 if (ret_freq && policy->cur &&
1531 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1532 /* verify no discrepancy between actual and
1533 saved value exists */
1534 if (unlikely(ret_freq != policy->cur)) {
1535 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1536 schedule_work(&policy->update);
1544 * cpufreq_get - get the current CPU frequency (in kHz)
1547 * Get the CPU current (static) CPU frequency
1549 unsigned int cpufreq_get(unsigned int cpu)
1551 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1552 unsigned int ret_freq = 0;
1555 down_read(&policy->rwsem);
1556 ret_freq = __cpufreq_get(cpu);
1557 up_read(&policy->rwsem);
1559 cpufreq_cpu_put(policy);
1564 EXPORT_SYMBOL(cpufreq_get);
1566 static struct subsys_interface cpufreq_interface = {
1568 .subsys = &cpu_subsys,
1569 .add_dev = cpufreq_add_dev,
1570 .remove_dev = cpufreq_remove_dev,
1574 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1576 * This function is only executed for the boot processor. The other CPUs
1577 * have been put offline by means of CPU hotplug.
1579 static int cpufreq_bp_suspend(void)
1583 int cpu = smp_processor_id();
1584 struct cpufreq_policy *policy;
1586 pr_debug("suspending cpu %u\n", cpu);
1588 /* If there's no policy for the boot CPU, we have nothing to do. */
1589 policy = cpufreq_cpu_get(cpu);
1593 if (cpufreq_driver->suspend) {
1594 ret = cpufreq_driver->suspend(policy);
1596 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1597 "step on CPU %u\n", policy->cpu);
1600 cpufreq_cpu_put(policy);
1605 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1607 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1608 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1609 * restored. It will verify that the current freq is in sync with
1610 * what we believe it to be. This is a bit later than when it
1611 * should be, but nonethteless it's better than calling
1612 * cpufreq_driver->get() here which might re-enable interrupts...
1614 * This function is only executed for the boot CPU. The other CPUs have not
1615 * been turned on yet.
1617 static void cpufreq_bp_resume(void)
1621 int cpu = smp_processor_id();
1622 struct cpufreq_policy *policy;
1624 pr_debug("resuming cpu %u\n", cpu);
1626 /* If there's no policy for the boot CPU, we have nothing to do. */
1627 policy = cpufreq_cpu_get(cpu);
1631 if (cpufreq_driver->resume) {
1632 ret = cpufreq_driver->resume(policy);
1634 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1635 "step on CPU %u\n", policy->cpu);
1640 schedule_work(&policy->update);
1643 cpufreq_cpu_put(policy);
1646 static struct syscore_ops cpufreq_syscore_ops = {
1647 .suspend = cpufreq_bp_suspend,
1648 .resume = cpufreq_bp_resume,
1652 * cpufreq_get_current_driver - return current driver's name
1654 * Return the name string of the currently loaded cpufreq driver
1657 const char *cpufreq_get_current_driver(void)
1660 return cpufreq_driver->name;
1664 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1666 /*********************************************************************
1667 * NOTIFIER LISTS INTERFACE *
1668 *********************************************************************/
1671 * cpufreq_register_notifier - register a driver with cpufreq
1672 * @nb: notifier function to register
1673 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1675 * Add a driver to one of two lists: either a list of drivers that
1676 * are notified about clock rate changes (once before and once after
1677 * the transition), or a list of drivers that are notified about
1678 * changes in cpufreq policy.
1680 * This function may sleep, and has the same return conditions as
1681 * blocking_notifier_chain_register.
1683 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1687 if (cpufreq_disabled())
1690 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1693 case CPUFREQ_TRANSITION_NOTIFIER:
1694 ret = srcu_notifier_chain_register(
1695 &cpufreq_transition_notifier_list, nb);
1697 case CPUFREQ_POLICY_NOTIFIER:
1698 ret = blocking_notifier_chain_register(
1699 &cpufreq_policy_notifier_list, nb);
1707 EXPORT_SYMBOL(cpufreq_register_notifier);
1710 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1711 * @nb: notifier block to be unregistered
1712 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1714 * Remove a driver from the CPU frequency notifier list.
1716 * This function may sleep, and has the same return conditions as
1717 * blocking_notifier_chain_unregister.
1719 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1723 if (cpufreq_disabled())
1727 case CPUFREQ_TRANSITION_NOTIFIER:
1728 ret = srcu_notifier_chain_unregister(
1729 &cpufreq_transition_notifier_list, nb);
1731 case CPUFREQ_POLICY_NOTIFIER:
1732 ret = blocking_notifier_chain_unregister(
1733 &cpufreq_policy_notifier_list, nb);
1741 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1744 /*********************************************************************
1746 *********************************************************************/
1748 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1749 unsigned int target_freq,
1750 unsigned int relation)
1752 int retval = -EINVAL;
1753 unsigned int old_target_freq = target_freq;
1755 if (cpufreq_disabled())
1758 /* Make sure that target_freq is within supported range */
1759 if (target_freq > policy->max)
1760 target_freq = policy->max;
1761 if (target_freq < policy->min)
1762 target_freq = policy->min;
1764 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1765 policy->cpu, target_freq, relation, old_target_freq);
1768 * This might look like a redundant call as we are checking it again
1769 * after finding index. But it is left intentionally for cases where
1770 * exactly same freq is called again and so we can save on few function
1773 if (target_freq == policy->cur)
1776 if (cpufreq_driver->target)
1777 retval = cpufreq_driver->target(policy, target_freq, relation);
1778 else if (cpufreq_driver->target_index) {
1779 struct cpufreq_frequency_table *freq_table;
1780 struct cpufreq_freqs freqs;
1784 freq_table = cpufreq_frequency_get_table(policy->cpu);
1785 if (unlikely(!freq_table)) {
1786 pr_err("%s: Unable to find freq_table\n", __func__);
1790 retval = cpufreq_frequency_table_target(policy, freq_table,
1791 target_freq, relation, &index);
1792 if (unlikely(retval)) {
1793 pr_err("%s: Unable to find matching freq\n", __func__);
1797 if (freq_table[index].frequency == policy->cur) {
1802 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1805 freqs.old = policy->cur;
1806 freqs.new = freq_table[index].frequency;
1809 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1810 __func__, policy->cpu, freqs.old,
1813 cpufreq_notify_transition(policy, &freqs,
1817 retval = cpufreq_driver->target_index(policy, index);
1819 pr_err("%s: Failed to change cpu frequency: %d\n",
1823 cpufreq_notify_post_transition(policy, &freqs, retval);
1829 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1831 int cpufreq_driver_target(struct cpufreq_policy *policy,
1832 unsigned int target_freq,
1833 unsigned int relation)
1837 down_write(&policy->rwsem);
1839 ret = __cpufreq_driver_target(policy, target_freq, relation);
1841 up_write(&policy->rwsem);
1845 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1848 * when "event" is CPUFREQ_GOV_LIMITS
1851 static int __cpufreq_governor(struct cpufreq_policy *policy,
1856 /* Only must be defined when default governor is known to have latency
1857 restrictions, like e.g. conservative or ondemand.
1858 That this is the case is already ensured in Kconfig
1860 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1861 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1863 struct cpufreq_governor *gov = NULL;
1866 if (policy->governor->max_transition_latency &&
1867 policy->cpuinfo.transition_latency >
1868 policy->governor->max_transition_latency) {
1872 printk(KERN_WARNING "%s governor failed, too long"
1873 " transition latency of HW, fallback"
1874 " to %s governor\n",
1875 policy->governor->name,
1877 policy->governor = gov;
1881 if (event == CPUFREQ_GOV_POLICY_INIT)
1882 if (!try_module_get(policy->governor->owner))
1885 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1886 policy->cpu, event);
1888 mutex_lock(&cpufreq_governor_lock);
1889 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1890 || (!policy->governor_enabled
1891 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1892 mutex_unlock(&cpufreq_governor_lock);
1896 if (event == CPUFREQ_GOV_STOP)
1897 policy->governor_enabled = false;
1898 else if (event == CPUFREQ_GOV_START)
1899 policy->governor_enabled = true;
1901 mutex_unlock(&cpufreq_governor_lock);
1903 ret = policy->governor->governor(policy, event);
1906 if (event == CPUFREQ_GOV_POLICY_INIT)
1907 policy->governor->initialized++;
1908 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1909 policy->governor->initialized--;
1911 /* Restore original values */
1912 mutex_lock(&cpufreq_governor_lock);
1913 if (event == CPUFREQ_GOV_STOP)
1914 policy->governor_enabled = true;
1915 else if (event == CPUFREQ_GOV_START)
1916 policy->governor_enabled = false;
1917 mutex_unlock(&cpufreq_governor_lock);
1920 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1921 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1922 module_put(policy->governor->owner);
1927 int cpufreq_register_governor(struct cpufreq_governor *governor)
1934 if (cpufreq_disabled())
1937 mutex_lock(&cpufreq_governor_mutex);
1939 governor->initialized = 0;
1941 if (__find_governor(governor->name) == NULL) {
1943 list_add(&governor->governor_list, &cpufreq_governor_list);
1946 mutex_unlock(&cpufreq_governor_mutex);
1949 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1951 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1953 #ifdef CONFIG_HOTPLUG_CPU
1960 if (cpufreq_disabled())
1963 #ifdef CONFIG_HOTPLUG_CPU
1964 for_each_present_cpu(cpu) {
1965 if (cpu_online(cpu))
1967 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1968 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1972 mutex_lock(&cpufreq_governor_mutex);
1973 list_del(&governor->governor_list);
1974 mutex_unlock(&cpufreq_governor_mutex);
1977 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1980 /*********************************************************************
1981 * POLICY INTERFACE *
1982 *********************************************************************/
1985 * cpufreq_get_policy - get the current cpufreq_policy
1986 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1989 * Reads the current cpufreq policy.
1991 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1993 struct cpufreq_policy *cpu_policy;
1997 cpu_policy = cpufreq_cpu_get(cpu);
2001 memcpy(policy, cpu_policy, sizeof(*policy));
2003 cpufreq_cpu_put(cpu_policy);
2006 EXPORT_SYMBOL(cpufreq_get_policy);
2009 * policy : current policy.
2010 * new_policy: policy to be set.
2012 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2013 struct cpufreq_policy *new_policy)
2015 int ret = 0, failed = 1;
2017 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
2018 new_policy->min, new_policy->max);
2020 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2022 if (new_policy->min > policy->max || new_policy->max < policy->min) {
2027 /* verify the cpu speed can be set within this limit */
2028 ret = cpufreq_driver->verify(new_policy);
2032 /* adjust if necessary - all reasons */
2033 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2034 CPUFREQ_ADJUST, new_policy);
2036 /* adjust if necessary - hardware incompatibility*/
2037 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2038 CPUFREQ_INCOMPATIBLE, new_policy);
2041 * verify the cpu speed can be set within this limit, which might be
2042 * different to the first one
2044 ret = cpufreq_driver->verify(new_policy);
2048 /* notification of the new policy */
2049 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2050 CPUFREQ_NOTIFY, new_policy);
2052 policy->min = new_policy->min;
2053 policy->max = new_policy->max;
2055 pr_debug("new min and max freqs are %u - %u kHz\n",
2056 policy->min, policy->max);
2058 if (cpufreq_driver->setpolicy) {
2059 policy->policy = new_policy->policy;
2060 pr_debug("setting range\n");
2061 ret = cpufreq_driver->setpolicy(new_policy);
2063 if (new_policy->governor != policy->governor) {
2064 /* save old, working values */
2065 struct cpufreq_governor *old_gov = policy->governor;
2067 pr_debug("governor switch\n");
2069 /* end old governor */
2070 if (policy->governor) {
2071 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2072 up_write(&policy->rwsem);
2073 __cpufreq_governor(policy,
2074 CPUFREQ_GOV_POLICY_EXIT);
2075 down_write(&policy->rwsem);
2078 /* start new governor */
2079 policy->governor = new_policy->governor;
2080 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2081 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2084 up_write(&policy->rwsem);
2085 __cpufreq_governor(policy,
2086 CPUFREQ_GOV_POLICY_EXIT);
2087 down_write(&policy->rwsem);
2092 /* new governor failed, so re-start old one */
2093 pr_debug("starting governor %s failed\n",
2094 policy->governor->name);
2096 policy->governor = old_gov;
2097 __cpufreq_governor(policy,
2098 CPUFREQ_GOV_POLICY_INIT);
2099 __cpufreq_governor(policy,
2105 /* might be a policy change, too, so fall through */
2107 pr_debug("governor: change or update limits\n");
2108 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2116 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2117 * @cpu: CPU which shall be re-evaluated
2119 * Useful for policy notifiers which have different necessities
2120 * at different times.
2122 int cpufreq_update_policy(unsigned int cpu)
2124 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2125 struct cpufreq_policy new_policy;
2133 down_write(&policy->rwsem);
2135 pr_debug("updating policy for CPU %u\n", cpu);
2136 memcpy(&new_policy, policy, sizeof(*policy));
2137 new_policy.min = policy->user_policy.min;
2138 new_policy.max = policy->user_policy.max;
2139 new_policy.policy = policy->user_policy.policy;
2140 new_policy.governor = policy->user_policy.governor;
2143 * BIOS might change freq behind our back
2144 * -> ask driver for current freq and notify governors about a change
2146 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2147 new_policy.cur = cpufreq_driver->get(cpu);
2149 pr_debug("Driver did not initialize current freq");
2150 policy->cur = new_policy.cur;
2152 if (policy->cur != new_policy.cur && has_target())
2153 cpufreq_out_of_sync(cpu, policy->cur,
2158 ret = cpufreq_set_policy(policy, &new_policy);
2160 up_write(&policy->rwsem);
2162 cpufreq_cpu_put(policy);
2166 EXPORT_SYMBOL(cpufreq_update_policy);
2168 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2169 unsigned long action, void *hcpu)
2171 unsigned int cpu = (unsigned long)hcpu;
2173 bool frozen = false;
2175 dev = get_cpu_device(cpu);
2178 if (action & CPU_TASKS_FROZEN)
2181 switch (action & ~CPU_TASKS_FROZEN) {
2183 __cpufreq_add_dev(dev, NULL, frozen);
2184 cpufreq_update_policy(cpu);
2187 case CPU_DOWN_PREPARE:
2188 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2192 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2195 case CPU_DOWN_FAILED:
2196 __cpufreq_add_dev(dev, NULL, frozen);
2203 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2204 .notifier_call = cpufreq_cpu_callback,
2207 /*********************************************************************
2209 *********************************************************************/
2210 static int cpufreq_boost_set_sw(int state)
2212 struct cpufreq_frequency_table *freq_table;
2213 struct cpufreq_policy *policy;
2216 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2217 freq_table = cpufreq_frequency_get_table(policy->cpu);
2219 ret = cpufreq_frequency_table_cpuinfo(policy,
2222 pr_err("%s: Policy frequency update failed\n",
2226 policy->user_policy.max = policy->max;
2227 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2234 int cpufreq_boost_trigger_state(int state)
2236 unsigned long flags;
2239 if (cpufreq_driver->boost_enabled == state)
2242 write_lock_irqsave(&cpufreq_driver_lock, flags);
2243 cpufreq_driver->boost_enabled = state;
2244 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2246 ret = cpufreq_driver->set_boost(state);
2248 write_lock_irqsave(&cpufreq_driver_lock, flags);
2249 cpufreq_driver->boost_enabled = !state;
2250 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2252 pr_err("%s: Cannot %s BOOST\n", __func__,
2253 state ? "enable" : "disable");
2259 int cpufreq_boost_supported(void)
2261 if (likely(cpufreq_driver))
2262 return cpufreq_driver->boost_supported;
2266 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2268 int cpufreq_boost_enabled(void)
2270 return cpufreq_driver->boost_enabled;
2272 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2274 /*********************************************************************
2275 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2276 *********************************************************************/
2279 * cpufreq_register_driver - register a CPU Frequency driver
2280 * @driver_data: A struct cpufreq_driver containing the values#
2281 * submitted by the CPU Frequency driver.
2283 * Registers a CPU Frequency driver to this core code. This code
2284 * returns zero on success, -EBUSY when another driver got here first
2285 * (and isn't unregistered in the meantime).
2288 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2290 unsigned long flags;
2293 if (cpufreq_disabled())
2296 if (!driver_data || !driver_data->verify || !driver_data->init ||
2297 !(driver_data->setpolicy || driver_data->target_index ||
2298 driver_data->target))
2301 pr_debug("trying to register driver %s\n", driver_data->name);
2303 if (driver_data->setpolicy)
2304 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2306 write_lock_irqsave(&cpufreq_driver_lock, flags);
2307 if (cpufreq_driver) {
2308 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2311 cpufreq_driver = driver_data;
2312 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2314 if (cpufreq_boost_supported()) {
2316 * Check if driver provides function to enable boost -
2317 * if not, use cpufreq_boost_set_sw as default
2319 if (!cpufreq_driver->set_boost)
2320 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2322 ret = cpufreq_sysfs_create_file(&boost.attr);
2324 pr_err("%s: cannot register global BOOST sysfs file\n",
2326 goto err_null_driver;
2330 ret = subsys_interface_register(&cpufreq_interface);
2332 goto err_boost_unreg;
2334 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2338 /* check for at least one working CPU */
2339 for (i = 0; i < nr_cpu_ids; i++)
2340 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2345 /* if all ->init() calls failed, unregister */
2347 pr_debug("no CPU initialized for driver %s\n",
2353 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2354 pr_debug("driver %s up and running\n", driver_data->name);
2358 subsys_interface_unregister(&cpufreq_interface);
2360 if (cpufreq_boost_supported())
2361 cpufreq_sysfs_remove_file(&boost.attr);
2363 write_lock_irqsave(&cpufreq_driver_lock, flags);
2364 cpufreq_driver = NULL;
2365 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2368 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2371 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2373 * Unregister the current CPUFreq driver. Only call this if you have
2374 * the right to do so, i.e. if you have succeeded in initialising before!
2375 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2376 * currently not initialised.
2378 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2380 unsigned long flags;
2382 if (!cpufreq_driver || (driver != cpufreq_driver))
2385 pr_debug("unregistering driver %s\n", driver->name);
2387 subsys_interface_unregister(&cpufreq_interface);
2388 if (cpufreq_boost_supported())
2389 cpufreq_sysfs_remove_file(&boost.attr);
2391 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2393 down_write(&cpufreq_rwsem);
2394 write_lock_irqsave(&cpufreq_driver_lock, flags);
2396 cpufreq_driver = NULL;
2398 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2399 up_write(&cpufreq_rwsem);
2403 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2405 static int __init cpufreq_core_init(void)
2407 if (cpufreq_disabled())
2410 cpufreq_global_kobject = kobject_create();
2411 BUG_ON(!cpufreq_global_kobject);
2412 register_syscore_ops(&cpufreq_syscore_ops);
2416 core_initcall(cpufreq_core_init);