2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
39 static struct cpufreq_driver *cpufreq_driver;
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
42 static DEFINE_RWLOCK(cpufreq_driver_lock);
43 DEFINE_MUTEX(cpufreq_governor_lock);
44 static LIST_HEAD(cpufreq_policy_list);
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
49 /* Flag to suspend/resume CPUFreq governors */
50 static bool cpufreq_suspended;
52 static inline bool has_target(void)
54 return cpufreq_driver->target_index || cpufreq_driver->target;
58 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61 static DECLARE_RWSEM(cpufreq_rwsem);
63 /* internal prototypes */
64 static int __cpufreq_governor(struct cpufreq_policy *policy,
66 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
67 static void handle_update(struct work_struct *work);
70 * Two notifier lists: the "policy" list is involved in the
71 * validation process for a new CPU frequency policy; the
72 * "transition" list for kernel code that needs to handle
73 * changes to devices when the CPU clock speed changes.
74 * The mutex locks both lists.
76 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
77 static struct srcu_notifier_head cpufreq_transition_notifier_list;
79 static bool init_cpufreq_transition_notifier_list_called;
80 static int __init init_cpufreq_transition_notifier_list(void)
82 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
83 init_cpufreq_transition_notifier_list_called = true;
86 pure_initcall(init_cpufreq_transition_notifier_list);
88 static int off __read_mostly;
89 static int cpufreq_disabled(void)
93 void disable_cpufreq(void)
97 static LIST_HEAD(cpufreq_governor_list);
98 static DEFINE_MUTEX(cpufreq_governor_mutex);
100 bool have_governor_per_policy(void)
102 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
104 EXPORT_SYMBOL_GPL(have_governor_per_policy);
106 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
108 if (have_governor_per_policy())
109 return &policy->kobj;
111 return cpufreq_global_kobject;
113 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
115 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
123 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
130 idle_time = cur_wall_time - busy_time;
132 *wall = cputime_to_usecs(cur_wall_time);
134 return cputime_to_usecs(idle_time);
137 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
139 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
141 if (idle_time == -1ULL)
142 return get_cpu_idle_time_jiffy(cpu, wall);
144 idle_time += get_cpu_iowait_time_us(cpu, wall);
148 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151 * This is a generic cpufreq init() routine which can be used by cpufreq
152 * drivers of SMP systems. It will do following:
153 * - validate & show freq table passed
154 * - set policies transition latency
155 * - policy->cpus with all possible CPUs
157 int cpufreq_generic_init(struct cpufreq_policy *policy,
158 struct cpufreq_frequency_table *table,
159 unsigned int transition_latency)
163 ret = cpufreq_table_validate_and_show(policy, table);
165 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
169 policy->cpuinfo.transition_latency = transition_latency;
172 * The driver only supports the SMP configuartion where all processors
173 * share the clock and voltage and clock.
175 cpumask_setall(policy->cpus);
179 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181 unsigned int cpufreq_generic_get(unsigned int cpu)
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185 if (!policy || IS_ERR(policy->clk)) {
186 pr_err("%s: No %s associated to cpu: %d\n",
187 __func__, policy ? "clk" : "policy", cpu);
191 return clk_get_rate(policy->clk) / 1000;
193 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
195 /* Only for cpufreq core internal use */
196 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
198 return per_cpu(cpufreq_cpu_data, cpu);
201 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
203 struct cpufreq_policy *policy = NULL;
206 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
209 if (!down_read_trylock(&cpufreq_rwsem))
212 /* get the cpufreq driver */
213 read_lock_irqsave(&cpufreq_driver_lock, flags);
215 if (cpufreq_driver) {
217 policy = per_cpu(cpufreq_cpu_data, cpu);
219 kobject_get(&policy->kobj);
222 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
225 up_read(&cpufreq_rwsem);
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231 void cpufreq_cpu_put(struct cpufreq_policy *policy)
233 if (cpufreq_disabled())
236 kobject_put(&policy->kobj);
237 up_read(&cpufreq_rwsem);
239 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
241 /*********************************************************************
242 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
243 *********************************************************************/
246 * adjust_jiffies - adjust the system "loops_per_jiffy"
248 * This function alters the system "loops_per_jiffy" for the clock
249 * speed change. Note that loops_per_jiffy cannot be updated on SMP
250 * systems as each CPU might be scaled differently. So, use the arch
251 * per-CPU loops_per_jiffy value wherever possible.
253 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256 static unsigned long l_p_j_ref;
257 static unsigned int l_p_j_ref_freq;
259 if (ci->flags & CPUFREQ_CONST_LOOPS)
262 if (!l_p_j_ref_freq) {
263 l_p_j_ref = loops_per_jiffy;
264 l_p_j_ref_freq = ci->old;
265 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
266 l_p_j_ref, l_p_j_ref_freq);
268 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
269 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
271 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
272 loops_per_jiffy, ci->new);
277 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
278 struct cpufreq_freqs *freqs, unsigned int state)
280 BUG_ON(irqs_disabled());
282 if (cpufreq_disabled())
285 freqs->flags = cpufreq_driver->flags;
286 pr_debug("notification %u of frequency transition to %u kHz\n",
291 case CPUFREQ_PRECHANGE:
292 /* detect if the driver reported a value as "old frequency"
293 * which is not equal to what the cpufreq core thinks is
296 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
297 if ((policy) && (policy->cpu == freqs->cpu) &&
298 (policy->cur) && (policy->cur != freqs->old)) {
299 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
300 freqs->old, policy->cur);
301 freqs->old = policy->cur;
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
305 CPUFREQ_PRECHANGE, freqs);
306 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
309 case CPUFREQ_POSTCHANGE:
310 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
311 pr_debug("FREQ: %lu - CPU: %lu\n",
312 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
313 trace_cpu_frequency(freqs->new, freqs->cpu);
314 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
315 CPUFREQ_POSTCHANGE, freqs);
316 if (likely(policy) && likely(policy->cpu == freqs->cpu))
317 policy->cur = freqs->new;
323 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
324 * on frequency transition.
326 * This function calls the transition notifiers and the "adjust_jiffies"
327 * function. It is called twice on all CPU frequency changes that have
330 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
331 struct cpufreq_freqs *freqs, unsigned int state)
333 for_each_cpu(freqs->cpu, policy->cpus)
334 __cpufreq_notify_transition(policy, freqs, state);
337 /* Do post notifications when there are chances that transition has failed */
338 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs, int transition_failed)
341 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
342 if (!transition_failed)
345 swap(freqs->old, freqs->new);
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
350 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
351 struct cpufreq_freqs *freqs)
355 * Catch double invocations of _begin() which lead to self-deadlock.
356 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
357 * doesn't invoke _begin() on their behalf, and hence the chances of
358 * double invocations are very low. Moreover, there are scenarios
359 * where these checks can emit false-positive warnings in these
360 * drivers; so we avoid that by skipping them altogether.
362 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
363 && current == policy->transition_task);
366 wait_event(policy->transition_wait, !policy->transition_ongoing);
368 spin_lock(&policy->transition_lock);
370 if (unlikely(policy->transition_ongoing)) {
371 spin_unlock(&policy->transition_lock);
375 policy->transition_ongoing = true;
376 policy->transition_task = current;
378 spin_unlock(&policy->transition_lock);
380 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
382 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
384 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
385 struct cpufreq_freqs *freqs, int transition_failed)
387 if (unlikely(WARN_ON(!policy->transition_ongoing)))
390 cpufreq_notify_post_transition(policy, freqs, transition_failed);
392 policy->transition_ongoing = false;
393 policy->transition_task = NULL;
395 wake_up(&policy->transition_wait);
397 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
400 /*********************************************************************
402 *********************************************************************/
403 static ssize_t show_boost(struct kobject *kobj,
404 struct attribute *attr, char *buf)
406 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
409 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
410 const char *buf, size_t count)
414 ret = sscanf(buf, "%d", &enable);
415 if (ret != 1 || enable < 0 || enable > 1)
418 if (cpufreq_boost_trigger_state(enable)) {
419 pr_err("%s: Cannot %s BOOST!\n",
420 __func__, enable ? "enable" : "disable");
424 pr_debug("%s: cpufreq BOOST %s\n",
425 __func__, enable ? "enabled" : "disabled");
429 define_one_global_rw(boost);
431 static struct cpufreq_governor *find_governor(const char *str_governor)
433 struct cpufreq_governor *t;
435 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
436 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
443 * cpufreq_parse_governor - parse a governor string
445 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
446 struct cpufreq_governor **governor)
453 if (cpufreq_driver->setpolicy) {
454 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
455 *policy = CPUFREQ_POLICY_PERFORMANCE;
457 } else if (!strncasecmp(str_governor, "powersave",
459 *policy = CPUFREQ_POLICY_POWERSAVE;
463 struct cpufreq_governor *t;
465 mutex_lock(&cpufreq_governor_mutex);
467 t = find_governor(str_governor);
472 mutex_unlock(&cpufreq_governor_mutex);
473 ret = request_module("cpufreq_%s", str_governor);
474 mutex_lock(&cpufreq_governor_mutex);
477 t = find_governor(str_governor);
485 mutex_unlock(&cpufreq_governor_mutex);
492 * cpufreq_per_cpu_attr_read() / show_##file_name() -
493 * print out cpufreq information
495 * Write out information from cpufreq_driver->policy[cpu]; object must be
499 #define show_one(file_name, object) \
500 static ssize_t show_##file_name \
501 (struct cpufreq_policy *policy, char *buf) \
503 return sprintf(buf, "%u\n", policy->object); \
506 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
507 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
508 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
509 show_one(scaling_min_freq, min);
510 show_one(scaling_max_freq, max);
512 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
516 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
517 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
519 ret = sprintf(buf, "%u\n", policy->cur);
523 static int cpufreq_set_policy(struct cpufreq_policy *policy,
524 struct cpufreq_policy *new_policy);
527 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
529 #define store_one(file_name, object) \
530 static ssize_t store_##file_name \
531 (struct cpufreq_policy *policy, const char *buf, size_t count) \
534 struct cpufreq_policy new_policy; \
536 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
540 ret = sscanf(buf, "%u", &new_policy.object); \
544 temp = new_policy.object; \
545 ret = cpufreq_set_policy(policy, &new_policy); \
547 policy->user_policy.object = temp; \
549 return ret ? ret : count; \
552 store_one(scaling_min_freq, min);
553 store_one(scaling_max_freq, max);
556 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
558 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
561 unsigned int cur_freq = __cpufreq_get(policy);
563 return sprintf(buf, "<unknown>");
564 return sprintf(buf, "%u\n", cur_freq);
568 * show_scaling_governor - show the current policy for the specified CPU
570 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
572 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
573 return sprintf(buf, "powersave\n");
574 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
575 return sprintf(buf, "performance\n");
576 else if (policy->governor)
577 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
578 policy->governor->name);
583 * store_scaling_governor - store policy for the specified CPU
585 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
586 const char *buf, size_t count)
589 char str_governor[16];
590 struct cpufreq_policy new_policy;
592 ret = cpufreq_get_policy(&new_policy, policy->cpu);
596 ret = sscanf(buf, "%15s", str_governor);
600 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
601 &new_policy.governor))
604 ret = cpufreq_set_policy(policy, &new_policy);
606 policy->user_policy.policy = policy->policy;
607 policy->user_policy.governor = policy->governor;
616 * show_scaling_driver - show the cpufreq driver currently loaded
618 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
620 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
624 * show_scaling_available_governors - show the available CPUfreq governors
626 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
630 struct cpufreq_governor *t;
633 i += sprintf(buf, "performance powersave");
637 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
638 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
639 - (CPUFREQ_NAME_LEN + 2)))
641 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
644 i += sprintf(&buf[i], "\n");
648 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
653 for_each_cpu(cpu, mask) {
655 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
656 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
657 if (i >= (PAGE_SIZE - 5))
660 i += sprintf(&buf[i], "\n");
663 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
666 * show_related_cpus - show the CPUs affected by each transition even if
667 * hw coordination is in use
669 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
671 return cpufreq_show_cpus(policy->related_cpus, buf);
675 * show_affected_cpus - show the CPUs affected by each transition
677 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
679 return cpufreq_show_cpus(policy->cpus, buf);
682 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
683 const char *buf, size_t count)
685 unsigned int freq = 0;
688 if (!policy->governor || !policy->governor->store_setspeed)
691 ret = sscanf(buf, "%u", &freq);
695 policy->governor->store_setspeed(policy, freq);
700 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
702 if (!policy->governor || !policy->governor->show_setspeed)
703 return sprintf(buf, "<unsupported>\n");
705 return policy->governor->show_setspeed(policy, buf);
709 * show_bios_limit - show the current cpufreq HW/BIOS limitation
711 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
715 if (cpufreq_driver->bios_limit) {
716 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
718 return sprintf(buf, "%u\n", limit);
720 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
723 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
724 cpufreq_freq_attr_ro(cpuinfo_min_freq);
725 cpufreq_freq_attr_ro(cpuinfo_max_freq);
726 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
727 cpufreq_freq_attr_ro(scaling_available_governors);
728 cpufreq_freq_attr_ro(scaling_driver);
729 cpufreq_freq_attr_ro(scaling_cur_freq);
730 cpufreq_freq_attr_ro(bios_limit);
731 cpufreq_freq_attr_ro(related_cpus);
732 cpufreq_freq_attr_ro(affected_cpus);
733 cpufreq_freq_attr_rw(scaling_min_freq);
734 cpufreq_freq_attr_rw(scaling_max_freq);
735 cpufreq_freq_attr_rw(scaling_governor);
736 cpufreq_freq_attr_rw(scaling_setspeed);
738 static struct attribute *default_attrs[] = {
739 &cpuinfo_min_freq.attr,
740 &cpuinfo_max_freq.attr,
741 &cpuinfo_transition_latency.attr,
742 &scaling_min_freq.attr,
743 &scaling_max_freq.attr,
746 &scaling_governor.attr,
747 &scaling_driver.attr,
748 &scaling_available_governors.attr,
749 &scaling_setspeed.attr,
753 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
754 #define to_attr(a) container_of(a, struct freq_attr, attr)
756 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
758 struct cpufreq_policy *policy = to_policy(kobj);
759 struct freq_attr *fattr = to_attr(attr);
762 if (!down_read_trylock(&cpufreq_rwsem))
765 down_read(&policy->rwsem);
768 ret = fattr->show(policy, buf);
772 up_read(&policy->rwsem);
773 up_read(&cpufreq_rwsem);
778 static ssize_t store(struct kobject *kobj, struct attribute *attr,
779 const char *buf, size_t count)
781 struct cpufreq_policy *policy = to_policy(kobj);
782 struct freq_attr *fattr = to_attr(attr);
783 ssize_t ret = -EINVAL;
787 if (!cpu_online(policy->cpu))
790 if (!down_read_trylock(&cpufreq_rwsem))
793 down_write(&policy->rwsem);
796 ret = fattr->store(policy, buf, count);
800 up_write(&policy->rwsem);
802 up_read(&cpufreq_rwsem);
809 static void cpufreq_sysfs_release(struct kobject *kobj)
811 struct cpufreq_policy *policy = to_policy(kobj);
812 pr_debug("last reference is dropped\n");
813 complete(&policy->kobj_unregister);
816 static const struct sysfs_ops sysfs_ops = {
821 static struct kobj_type ktype_cpufreq = {
822 .sysfs_ops = &sysfs_ops,
823 .default_attrs = default_attrs,
824 .release = cpufreq_sysfs_release,
827 struct kobject *cpufreq_global_kobject;
828 EXPORT_SYMBOL(cpufreq_global_kobject);
830 static int cpufreq_global_kobject_usage;
832 int cpufreq_get_global_kobject(void)
834 if (!cpufreq_global_kobject_usage++)
835 return kobject_add(cpufreq_global_kobject,
836 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
840 EXPORT_SYMBOL(cpufreq_get_global_kobject);
842 void cpufreq_put_global_kobject(void)
844 if (!--cpufreq_global_kobject_usage)
845 kobject_del(cpufreq_global_kobject);
847 EXPORT_SYMBOL(cpufreq_put_global_kobject);
849 int cpufreq_sysfs_create_file(const struct attribute *attr)
851 int ret = cpufreq_get_global_kobject();
854 ret = sysfs_create_file(cpufreq_global_kobject, attr);
856 cpufreq_put_global_kobject();
861 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
863 void cpufreq_sysfs_remove_file(const struct attribute *attr)
865 sysfs_remove_file(cpufreq_global_kobject, attr);
866 cpufreq_put_global_kobject();
868 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
870 /* symlink affected CPUs */
871 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
876 for_each_cpu(j, policy->cpus) {
877 struct device *cpu_dev;
879 if (j == policy->cpu)
882 pr_debug("Adding link for CPU: %u\n", j);
883 cpu_dev = get_cpu_device(j);
884 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
892 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
895 struct freq_attr **drv_attr;
898 /* set up files for this cpu device */
899 drv_attr = cpufreq_driver->attr;
900 while (drv_attr && *drv_attr) {
901 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
906 if (cpufreq_driver->get) {
907 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
912 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
916 if (cpufreq_driver->bios_limit) {
917 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
922 return cpufreq_add_dev_symlink(policy);
925 static void cpufreq_init_policy(struct cpufreq_policy *policy)
927 struct cpufreq_governor *gov = NULL;
928 struct cpufreq_policy new_policy;
931 memcpy(&new_policy, policy, sizeof(*policy));
933 /* Update governor of new_policy to the governor used before hotplug */
934 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
936 pr_debug("Restoring governor %s for cpu %d\n",
937 policy->governor->name, policy->cpu);
939 gov = CPUFREQ_DEFAULT_GOVERNOR;
941 new_policy.governor = gov;
943 /* Use the default policy if its valid. */
944 if (cpufreq_driver->setpolicy)
945 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
947 /* set default policy */
948 ret = cpufreq_set_policy(policy, &new_policy);
950 pr_debug("setting policy failed\n");
951 if (cpufreq_driver->exit)
952 cpufreq_driver->exit(policy);
956 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
957 unsigned int cpu, struct device *dev)
963 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
965 pr_err("%s: Failed to stop governor\n", __func__);
970 down_write(&policy->rwsem);
972 write_lock_irqsave(&cpufreq_driver_lock, flags);
974 cpumask_set_cpu(cpu, policy->cpus);
975 per_cpu(cpufreq_cpu_data, cpu) = policy;
976 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
978 up_write(&policy->rwsem);
981 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
983 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
986 pr_err("%s: Failed to start governor\n", __func__);
991 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
994 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
996 struct cpufreq_policy *policy;
999 read_lock_irqsave(&cpufreq_driver_lock, flags);
1001 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1003 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1006 policy->governor = NULL;
1011 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1013 struct cpufreq_policy *policy;
1015 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1019 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1020 goto err_free_policy;
1022 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1023 goto err_free_cpumask;
1025 INIT_LIST_HEAD(&policy->policy_list);
1026 init_rwsem(&policy->rwsem);
1027 spin_lock_init(&policy->transition_lock);
1028 init_waitqueue_head(&policy->transition_wait);
1029 init_completion(&policy->kobj_unregister);
1030 INIT_WORK(&policy->update, handle_update);
1035 free_cpumask_var(policy->cpus);
1042 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1044 struct kobject *kobj;
1045 struct completion *cmp;
1047 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1048 CPUFREQ_REMOVE_POLICY, policy);
1050 down_read(&policy->rwsem);
1051 kobj = &policy->kobj;
1052 cmp = &policy->kobj_unregister;
1053 up_read(&policy->rwsem);
1057 * We need to make sure that the underlying kobj is
1058 * actually not referenced anymore by anybody before we
1059 * proceed with unloading.
1061 pr_debug("waiting for dropping of refcount\n");
1062 wait_for_completion(cmp);
1063 pr_debug("wait complete\n");
1066 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1068 free_cpumask_var(policy->related_cpus);
1069 free_cpumask_var(policy->cpus);
1073 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1074 struct device *cpu_dev)
1078 if (WARN_ON(cpu == policy->cpu))
1081 /* Move kobject to the new policy->cpu */
1082 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1084 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1088 down_write(&policy->rwsem);
1090 up_write(&policy->rwsem);
1095 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1097 unsigned int j, cpu = dev->id;
1099 struct cpufreq_policy *policy;
1100 unsigned long flags;
1101 bool recover_policy = cpufreq_suspended;
1103 if (cpu_is_offline(cpu))
1106 pr_debug("adding CPU %u\n", cpu);
1108 /* check whether a different CPU already registered this
1109 * CPU because it is in the same boat. */
1110 policy = cpufreq_cpu_get_raw(cpu);
1111 if (unlikely(policy))
1114 if (!down_read_trylock(&cpufreq_rwsem))
1117 /* Check if this cpu was hot-unplugged earlier and has siblings */
1118 read_lock_irqsave(&cpufreq_driver_lock, flags);
1119 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1120 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1121 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1123 up_read(&cpufreq_rwsem);
1127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1130 * Restore the saved policy when doing light-weight init and fall back
1131 * to the full init if that fails.
1133 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1135 recover_policy = false;
1136 policy = cpufreq_policy_alloc();
1142 * In the resume path, since we restore a saved policy, the assignment
1143 * to policy->cpu is like an update of the existing policy, rather than
1144 * the creation of a brand new one. So we need to perform this update
1145 * by invoking update_policy_cpu().
1147 if (recover_policy && cpu != policy->cpu)
1148 WARN_ON(update_policy_cpu(policy, cpu, dev));
1152 cpumask_copy(policy->cpus, cpumask_of(cpu));
1154 /* call driver. From then on the cpufreq must be able
1155 * to accept all calls to ->verify and ->setpolicy for this CPU
1157 ret = cpufreq_driver->init(policy);
1159 pr_debug("initialization failed\n");
1160 goto err_set_policy_cpu;
1163 down_write(&policy->rwsem);
1165 /* related cpus should atleast have policy->cpus */
1166 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1169 * affected cpus must always be the one, which are online. We aren't
1170 * managing offline cpus here.
1172 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1174 if (!recover_policy) {
1175 policy->user_policy.min = policy->min;
1176 policy->user_policy.max = policy->max;
1178 /* prepare interface data */
1179 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1180 &dev->kobj, "cpufreq");
1182 pr_err("%s: failed to init policy->kobj: %d\n",
1184 goto err_init_policy_kobj;
1188 write_lock_irqsave(&cpufreq_driver_lock, flags);
1189 for_each_cpu(j, policy->cpus)
1190 per_cpu(cpufreq_cpu_data, j) = policy;
1191 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1193 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1194 policy->cur = cpufreq_driver->get(policy->cpu);
1196 pr_err("%s: ->get() failed\n", __func__);
1202 * Sometimes boot loaders set CPU frequency to a value outside of
1203 * frequency table present with cpufreq core. In such cases CPU might be
1204 * unstable if it has to run on that frequency for long duration of time
1205 * and so its better to set it to a frequency which is specified in
1206 * freq-table. This also makes cpufreq stats inconsistent as
1207 * cpufreq-stats would fail to register because current frequency of CPU
1208 * isn't found in freq-table.
1210 * Because we don't want this change to effect boot process badly, we go
1211 * for the next freq which is >= policy->cur ('cur' must be set by now,
1212 * otherwise we will end up setting freq to lowest of the table as 'cur'
1213 * is initialized to zero).
1215 * We are passing target-freq as "policy->cur - 1" otherwise
1216 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1217 * equal to target-freq.
1219 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1221 /* Are we running at unknown frequency ? */
1222 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1223 if (ret == -EINVAL) {
1224 /* Warn user and fix it */
1225 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1226 __func__, policy->cpu, policy->cur);
1227 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1228 CPUFREQ_RELATION_L);
1231 * Reaching here after boot in a few seconds may not
1232 * mean that system will remain stable at "unknown"
1233 * frequency for longer duration. Hence, a BUG_ON().
1236 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1237 __func__, policy->cpu, policy->cur);
1241 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1242 CPUFREQ_START, policy);
1244 if (!recover_policy) {
1245 ret = cpufreq_add_dev_interface(policy, dev);
1247 goto err_out_unregister;
1248 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1249 CPUFREQ_CREATE_POLICY, policy);
1252 write_lock_irqsave(&cpufreq_driver_lock, flags);
1253 list_add(&policy->policy_list, &cpufreq_policy_list);
1254 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1256 cpufreq_init_policy(policy);
1258 if (!recover_policy) {
1259 policy->user_policy.policy = policy->policy;
1260 policy->user_policy.governor = policy->governor;
1262 up_write(&policy->rwsem);
1264 kobject_uevent(&policy->kobj, KOBJ_ADD);
1266 up_read(&cpufreq_rwsem);
1268 /* Callback for handling stuff after policy is ready */
1269 if (cpufreq_driver->ready)
1270 cpufreq_driver->ready(policy);
1272 pr_debug("initialization complete\n");
1278 write_lock_irqsave(&cpufreq_driver_lock, flags);
1279 for_each_cpu(j, policy->cpus)
1280 per_cpu(cpufreq_cpu_data, j) = NULL;
1281 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1283 if (!recover_policy) {
1284 kobject_put(&policy->kobj);
1285 wait_for_completion(&policy->kobj_unregister);
1287 err_init_policy_kobj:
1288 up_write(&policy->rwsem);
1290 if (cpufreq_driver->exit)
1291 cpufreq_driver->exit(policy);
1293 if (recover_policy) {
1294 /* Do not leave stale fallback data behind. */
1295 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1296 cpufreq_policy_put_kobj(policy);
1298 cpufreq_policy_free(policy);
1301 up_read(&cpufreq_rwsem);
1307 * cpufreq_add_dev - add a CPU device
1309 * Adds the cpufreq interface for a CPU device.
1311 * The Oracle says: try running cpufreq registration/unregistration concurrently
1312 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1313 * mess up, but more thorough testing is needed. - Mathieu
1315 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1317 return __cpufreq_add_dev(dev, sif);
1320 static int __cpufreq_remove_dev_prepare(struct device *dev,
1321 struct subsys_interface *sif)
1323 unsigned int cpu = dev->id, cpus;
1325 unsigned long flags;
1326 struct cpufreq_policy *policy;
1328 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1330 write_lock_irqsave(&cpufreq_driver_lock, flags);
1332 policy = per_cpu(cpufreq_cpu_data, cpu);
1334 /* Save the policy somewhere when doing a light-weight tear-down */
1335 if (cpufreq_suspended)
1336 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1338 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1341 pr_debug("%s: No cpu_data found\n", __func__);
1346 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1348 pr_err("%s: Failed to stop governor\n", __func__);
1352 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1353 policy->governor->name, CPUFREQ_NAME_LEN);
1356 down_read(&policy->rwsem);
1357 cpus = cpumask_weight(policy->cpus);
1358 up_read(&policy->rwsem);
1360 if (cpu != policy->cpu) {
1361 sysfs_remove_link(&dev->kobj, "cpufreq");
1362 } else if (cpus > 1) {
1363 /* Nominate new CPU */
1364 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1365 struct device *cpu_dev = get_cpu_device(new_cpu);
1367 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1368 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1370 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1372 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1373 __func__, cpu_dev->id);
1377 if (!cpufreq_suspended)
1378 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1379 __func__, new_cpu, cpu);
1380 } else if (cpufreq_driver->stop_cpu) {
1381 cpufreq_driver->stop_cpu(policy);
1387 static int __cpufreq_remove_dev_finish(struct device *dev,
1388 struct subsys_interface *sif)
1390 unsigned int cpu = dev->id, cpus;
1392 unsigned long flags;
1393 struct cpufreq_policy *policy;
1395 write_lock_irqsave(&cpufreq_driver_lock, flags);
1396 policy = per_cpu(cpufreq_cpu_data, cpu);
1397 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1398 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1401 pr_debug("%s: No cpu_data found\n", __func__);
1405 down_write(&policy->rwsem);
1406 cpus = cpumask_weight(policy->cpus);
1409 cpumask_clear_cpu(cpu, policy->cpus);
1410 up_write(&policy->rwsem);
1412 /* If cpu is last user of policy, free policy */
1415 ret = __cpufreq_governor(policy,
1416 CPUFREQ_GOV_POLICY_EXIT);
1418 pr_err("%s: Failed to exit governor\n",
1424 if (!cpufreq_suspended)
1425 cpufreq_policy_put_kobj(policy);
1428 * Perform the ->exit() even during light-weight tear-down,
1429 * since this is a core component, and is essential for the
1430 * subsequent light-weight ->init() to succeed.
1432 if (cpufreq_driver->exit)
1433 cpufreq_driver->exit(policy);
1435 /* Remove policy from list of active policies */
1436 write_lock_irqsave(&cpufreq_driver_lock, flags);
1437 list_del(&policy->policy_list);
1438 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1440 if (!cpufreq_suspended)
1441 cpufreq_policy_free(policy);
1442 } else if (has_target()) {
1443 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1445 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1448 pr_err("%s: Failed to start governor\n", __func__);
1457 * cpufreq_remove_dev - remove a CPU device
1459 * Removes the cpufreq interface for a CPU device.
1461 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1463 unsigned int cpu = dev->id;
1466 if (cpu_is_offline(cpu))
1469 ret = __cpufreq_remove_dev_prepare(dev, sif);
1472 ret = __cpufreq_remove_dev_finish(dev, sif);
1477 static void handle_update(struct work_struct *work)
1479 struct cpufreq_policy *policy =
1480 container_of(work, struct cpufreq_policy, update);
1481 unsigned int cpu = policy->cpu;
1482 pr_debug("handle_update for cpu %u called\n", cpu);
1483 cpufreq_update_policy(cpu);
1487 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1489 * @policy: policy managing CPUs
1490 * @new_freq: CPU frequency the CPU actually runs at
1492 * We adjust to current frequency first, and need to clean up later.
1493 * So either call to cpufreq_update_policy() or schedule handle_update()).
1495 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1496 unsigned int new_freq)
1498 struct cpufreq_freqs freqs;
1500 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1501 policy->cur, new_freq);
1503 freqs.old = policy->cur;
1504 freqs.new = new_freq;
1506 cpufreq_freq_transition_begin(policy, &freqs);
1507 cpufreq_freq_transition_end(policy, &freqs, 0);
1511 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1514 * This is the last known freq, without actually getting it from the driver.
1515 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1517 unsigned int cpufreq_quick_get(unsigned int cpu)
1519 struct cpufreq_policy *policy;
1520 unsigned int ret_freq = 0;
1522 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1523 return cpufreq_driver->get(cpu);
1525 policy = cpufreq_cpu_get(cpu);
1527 ret_freq = policy->cur;
1528 cpufreq_cpu_put(policy);
1533 EXPORT_SYMBOL(cpufreq_quick_get);
1536 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1539 * Just return the max possible frequency for a given CPU.
1541 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1543 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1544 unsigned int ret_freq = 0;
1547 ret_freq = policy->max;
1548 cpufreq_cpu_put(policy);
1553 EXPORT_SYMBOL(cpufreq_quick_get_max);
1555 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1557 unsigned int ret_freq = 0;
1559 if (!cpufreq_driver->get)
1562 ret_freq = cpufreq_driver->get(policy->cpu);
1564 if (ret_freq && policy->cur &&
1565 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1566 /* verify no discrepancy between actual and
1567 saved value exists */
1568 if (unlikely(ret_freq != policy->cur)) {
1569 cpufreq_out_of_sync(policy, ret_freq);
1570 schedule_work(&policy->update);
1578 * cpufreq_get - get the current CPU frequency (in kHz)
1581 * Get the CPU current (static) CPU frequency
1583 unsigned int cpufreq_get(unsigned int cpu)
1585 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1586 unsigned int ret_freq = 0;
1589 down_read(&policy->rwsem);
1590 ret_freq = __cpufreq_get(policy);
1591 up_read(&policy->rwsem);
1593 cpufreq_cpu_put(policy);
1598 EXPORT_SYMBOL(cpufreq_get);
1600 static struct subsys_interface cpufreq_interface = {
1602 .subsys = &cpu_subsys,
1603 .add_dev = cpufreq_add_dev,
1604 .remove_dev = cpufreq_remove_dev,
1608 * In case platform wants some specific frequency to be configured
1611 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1615 if (!policy->suspend_freq) {
1616 pr_err("%s: suspend_freq can't be zero\n", __func__);
1620 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1621 policy->suspend_freq);
1623 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1624 CPUFREQ_RELATION_H);
1626 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1627 __func__, policy->suspend_freq, ret);
1631 EXPORT_SYMBOL(cpufreq_generic_suspend);
1634 * cpufreq_suspend() - Suspend CPUFreq governors
1636 * Called during system wide Suspend/Hibernate cycles for suspending governors
1637 * as some platforms can't change frequency after this point in suspend cycle.
1638 * Because some of the devices (like: i2c, regulators, etc) they use for
1639 * changing frequency are suspended quickly after this point.
1641 void cpufreq_suspend(void)
1643 struct cpufreq_policy *policy;
1645 if (!cpufreq_driver)
1651 pr_debug("%s: Suspending Governors\n", __func__);
1653 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1654 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1655 pr_err("%s: Failed to stop governor for policy: %p\n",
1657 else if (cpufreq_driver->suspend
1658 && cpufreq_driver->suspend(policy))
1659 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1664 cpufreq_suspended = true;
1668 * cpufreq_resume() - Resume CPUFreq governors
1670 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1671 * are suspended with cpufreq_suspend().
1673 void cpufreq_resume(void)
1675 struct cpufreq_policy *policy;
1677 if (!cpufreq_driver)
1680 cpufreq_suspended = false;
1685 pr_debug("%s: Resuming Governors\n", __func__);
1687 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1688 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1689 pr_err("%s: Failed to resume driver: %p\n", __func__,
1691 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1692 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1693 pr_err("%s: Failed to start governor for policy: %p\n",
1697 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1698 * policy in list. It will verify that the current freq is in
1699 * sync with what we believe it to be.
1701 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1702 schedule_work(&policy->update);
1707 * cpufreq_get_current_driver - return current driver's name
1709 * Return the name string of the currently loaded cpufreq driver
1712 const char *cpufreq_get_current_driver(void)
1715 return cpufreq_driver->name;
1719 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1722 * cpufreq_get_driver_data - return current driver data
1724 * Return the private data of the currently loaded cpufreq
1725 * driver, or NULL if no cpufreq driver is loaded.
1727 void *cpufreq_get_driver_data(void)
1730 return cpufreq_driver->driver_data;
1734 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1736 /*********************************************************************
1737 * NOTIFIER LISTS INTERFACE *
1738 *********************************************************************/
1741 * cpufreq_register_notifier - register a driver with cpufreq
1742 * @nb: notifier function to register
1743 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1745 * Add a driver to one of two lists: either a list of drivers that
1746 * are notified about clock rate changes (once before and once after
1747 * the transition), or a list of drivers that are notified about
1748 * changes in cpufreq policy.
1750 * This function may sleep, and has the same return conditions as
1751 * blocking_notifier_chain_register.
1753 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1757 if (cpufreq_disabled())
1760 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1763 case CPUFREQ_TRANSITION_NOTIFIER:
1764 ret = srcu_notifier_chain_register(
1765 &cpufreq_transition_notifier_list, nb);
1767 case CPUFREQ_POLICY_NOTIFIER:
1768 ret = blocking_notifier_chain_register(
1769 &cpufreq_policy_notifier_list, nb);
1777 EXPORT_SYMBOL(cpufreq_register_notifier);
1780 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1781 * @nb: notifier block to be unregistered
1782 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1784 * Remove a driver from the CPU frequency notifier list.
1786 * This function may sleep, and has the same return conditions as
1787 * blocking_notifier_chain_unregister.
1789 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1793 if (cpufreq_disabled())
1797 case CPUFREQ_TRANSITION_NOTIFIER:
1798 ret = srcu_notifier_chain_unregister(
1799 &cpufreq_transition_notifier_list, nb);
1801 case CPUFREQ_POLICY_NOTIFIER:
1802 ret = blocking_notifier_chain_unregister(
1803 &cpufreq_policy_notifier_list, nb);
1811 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1814 /*********************************************************************
1816 *********************************************************************/
1818 /* Must set freqs->new to intermediate frequency */
1819 static int __target_intermediate(struct cpufreq_policy *policy,
1820 struct cpufreq_freqs *freqs, int index)
1824 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1826 /* We don't need to switch to intermediate freq */
1830 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1831 __func__, policy->cpu, freqs->old, freqs->new);
1833 cpufreq_freq_transition_begin(policy, freqs);
1834 ret = cpufreq_driver->target_intermediate(policy, index);
1835 cpufreq_freq_transition_end(policy, freqs, ret);
1838 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1844 static int __target_index(struct cpufreq_policy *policy,
1845 struct cpufreq_frequency_table *freq_table, int index)
1847 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1848 unsigned int intermediate_freq = 0;
1849 int retval = -EINVAL;
1852 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1854 /* Handle switching to intermediate frequency */
1855 if (cpufreq_driver->get_intermediate) {
1856 retval = __target_intermediate(policy, &freqs, index);
1860 intermediate_freq = freqs.new;
1861 /* Set old freq to intermediate */
1862 if (intermediate_freq)
1863 freqs.old = freqs.new;
1866 freqs.new = freq_table[index].frequency;
1867 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1868 __func__, policy->cpu, freqs.old, freqs.new);
1870 cpufreq_freq_transition_begin(policy, &freqs);
1873 retval = cpufreq_driver->target_index(policy, index);
1875 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1879 cpufreq_freq_transition_end(policy, &freqs, retval);
1882 * Failed after setting to intermediate freq? Driver should have
1883 * reverted back to initial frequency and so should we. Check
1884 * here for intermediate_freq instead of get_intermediate, in
1885 * case we have't switched to intermediate freq at all.
1887 if (unlikely(retval && intermediate_freq)) {
1888 freqs.old = intermediate_freq;
1889 freqs.new = policy->restore_freq;
1890 cpufreq_freq_transition_begin(policy, &freqs);
1891 cpufreq_freq_transition_end(policy, &freqs, 0);
1898 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1899 unsigned int target_freq,
1900 unsigned int relation)
1902 unsigned int old_target_freq = target_freq;
1903 int retval = -EINVAL;
1905 if (cpufreq_disabled())
1908 /* Make sure that target_freq is within supported range */
1909 if (target_freq > policy->max)
1910 target_freq = policy->max;
1911 if (target_freq < policy->min)
1912 target_freq = policy->min;
1914 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1915 policy->cpu, target_freq, relation, old_target_freq);
1918 * This might look like a redundant call as we are checking it again
1919 * after finding index. But it is left intentionally for cases where
1920 * exactly same freq is called again and so we can save on few function
1923 if (target_freq == policy->cur)
1926 /* Save last value to restore later on errors */
1927 policy->restore_freq = policy->cur;
1929 if (cpufreq_driver->target)
1930 retval = cpufreq_driver->target(policy, target_freq, relation);
1931 else if (cpufreq_driver->target_index) {
1932 struct cpufreq_frequency_table *freq_table;
1935 freq_table = cpufreq_frequency_get_table(policy->cpu);
1936 if (unlikely(!freq_table)) {
1937 pr_err("%s: Unable to find freq_table\n", __func__);
1941 retval = cpufreq_frequency_table_target(policy, freq_table,
1942 target_freq, relation, &index);
1943 if (unlikely(retval)) {
1944 pr_err("%s: Unable to find matching freq\n", __func__);
1948 if (freq_table[index].frequency == policy->cur) {
1953 retval = __target_index(policy, freq_table, index);
1959 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1961 int cpufreq_driver_target(struct cpufreq_policy *policy,
1962 unsigned int target_freq,
1963 unsigned int relation)
1967 down_write(&policy->rwsem);
1969 ret = __cpufreq_driver_target(policy, target_freq, relation);
1971 up_write(&policy->rwsem);
1975 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1977 static int __cpufreq_governor(struct cpufreq_policy *policy,
1982 /* Only must be defined when default governor is known to have latency
1983 restrictions, like e.g. conservative or ondemand.
1984 That this is the case is already ensured in Kconfig
1986 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1987 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1989 struct cpufreq_governor *gov = NULL;
1992 /* Don't start any governor operations if we are entering suspend */
1993 if (cpufreq_suspended)
1996 * Governor might not be initiated here if ACPI _PPC changed
1997 * notification happened, so check it.
1999 if (!policy->governor)
2002 if (policy->governor->max_transition_latency &&
2003 policy->cpuinfo.transition_latency >
2004 policy->governor->max_transition_latency) {
2008 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2009 policy->governor->name, gov->name);
2010 policy->governor = gov;
2014 if (event == CPUFREQ_GOV_POLICY_INIT)
2015 if (!try_module_get(policy->governor->owner))
2018 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2019 policy->cpu, event);
2021 mutex_lock(&cpufreq_governor_lock);
2022 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2023 || (!policy->governor_enabled
2024 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2025 mutex_unlock(&cpufreq_governor_lock);
2029 if (event == CPUFREQ_GOV_STOP)
2030 policy->governor_enabled = false;
2031 else if (event == CPUFREQ_GOV_START)
2032 policy->governor_enabled = true;
2034 mutex_unlock(&cpufreq_governor_lock);
2036 ret = policy->governor->governor(policy, event);
2039 if (event == CPUFREQ_GOV_POLICY_INIT)
2040 policy->governor->initialized++;
2041 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2042 policy->governor->initialized--;
2044 /* Restore original values */
2045 mutex_lock(&cpufreq_governor_lock);
2046 if (event == CPUFREQ_GOV_STOP)
2047 policy->governor_enabled = true;
2048 else if (event == CPUFREQ_GOV_START)
2049 policy->governor_enabled = false;
2050 mutex_unlock(&cpufreq_governor_lock);
2053 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2054 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2055 module_put(policy->governor->owner);
2060 int cpufreq_register_governor(struct cpufreq_governor *governor)
2067 if (cpufreq_disabled())
2070 mutex_lock(&cpufreq_governor_mutex);
2072 governor->initialized = 0;
2074 if (!find_governor(governor->name)) {
2076 list_add(&governor->governor_list, &cpufreq_governor_list);
2079 mutex_unlock(&cpufreq_governor_mutex);
2082 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2084 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2091 if (cpufreq_disabled())
2094 for_each_present_cpu(cpu) {
2095 if (cpu_online(cpu))
2097 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2098 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2101 mutex_lock(&cpufreq_governor_mutex);
2102 list_del(&governor->governor_list);
2103 mutex_unlock(&cpufreq_governor_mutex);
2106 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2109 /*********************************************************************
2110 * POLICY INTERFACE *
2111 *********************************************************************/
2114 * cpufreq_get_policy - get the current cpufreq_policy
2115 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2118 * Reads the current cpufreq policy.
2120 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2122 struct cpufreq_policy *cpu_policy;
2126 cpu_policy = cpufreq_cpu_get(cpu);
2130 memcpy(policy, cpu_policy, sizeof(*policy));
2132 cpufreq_cpu_put(cpu_policy);
2135 EXPORT_SYMBOL(cpufreq_get_policy);
2138 * policy : current policy.
2139 * new_policy: policy to be set.
2141 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2142 struct cpufreq_policy *new_policy)
2144 struct cpufreq_governor *old_gov;
2147 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2148 new_policy->cpu, new_policy->min, new_policy->max);
2150 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2152 if (new_policy->min > policy->max || new_policy->max < policy->min)
2155 /* verify the cpu speed can be set within this limit */
2156 ret = cpufreq_driver->verify(new_policy);
2160 /* adjust if necessary - all reasons */
2161 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2162 CPUFREQ_ADJUST, new_policy);
2164 /* adjust if necessary - hardware incompatibility*/
2165 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2166 CPUFREQ_INCOMPATIBLE, new_policy);
2169 * verify the cpu speed can be set within this limit, which might be
2170 * different to the first one
2172 ret = cpufreq_driver->verify(new_policy);
2176 /* notification of the new policy */
2177 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2178 CPUFREQ_NOTIFY, new_policy);
2180 policy->min = new_policy->min;
2181 policy->max = new_policy->max;
2183 pr_debug("new min and max freqs are %u - %u kHz\n",
2184 policy->min, policy->max);
2186 if (cpufreq_driver->setpolicy) {
2187 policy->policy = new_policy->policy;
2188 pr_debug("setting range\n");
2189 return cpufreq_driver->setpolicy(new_policy);
2192 if (new_policy->governor == policy->governor)
2195 pr_debug("governor switch\n");
2197 /* save old, working values */
2198 old_gov = policy->governor;
2199 /* end old governor */
2201 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2202 up_write(&policy->rwsem);
2203 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2204 down_write(&policy->rwsem);
2207 /* start new governor */
2208 policy->governor = new_policy->governor;
2209 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2210 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2213 up_write(&policy->rwsem);
2214 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2215 down_write(&policy->rwsem);
2218 /* new governor failed, so re-start old one */
2219 pr_debug("starting governor %s failed\n", policy->governor->name);
2221 policy->governor = old_gov;
2222 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2223 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2229 pr_debug("governor: change or update limits\n");
2230 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2234 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2235 * @cpu: CPU which shall be re-evaluated
2237 * Useful for policy notifiers which have different necessities
2238 * at different times.
2240 int cpufreq_update_policy(unsigned int cpu)
2242 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2243 struct cpufreq_policy new_policy;
2249 down_write(&policy->rwsem);
2251 pr_debug("updating policy for CPU %u\n", cpu);
2252 memcpy(&new_policy, policy, sizeof(*policy));
2253 new_policy.min = policy->user_policy.min;
2254 new_policy.max = policy->user_policy.max;
2255 new_policy.policy = policy->user_policy.policy;
2256 new_policy.governor = policy->user_policy.governor;
2259 * BIOS might change freq behind our back
2260 * -> ask driver for current freq and notify governors about a change
2262 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2263 new_policy.cur = cpufreq_driver->get(cpu);
2264 if (WARN_ON(!new_policy.cur)) {
2270 pr_debug("Driver did not initialize current freq\n");
2271 policy->cur = new_policy.cur;
2273 if (policy->cur != new_policy.cur && has_target())
2274 cpufreq_out_of_sync(policy, new_policy.cur);
2278 ret = cpufreq_set_policy(policy, &new_policy);
2281 up_write(&policy->rwsem);
2283 cpufreq_cpu_put(policy);
2286 EXPORT_SYMBOL(cpufreq_update_policy);
2288 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2289 unsigned long action, void *hcpu)
2291 unsigned int cpu = (unsigned long)hcpu;
2294 dev = get_cpu_device(cpu);
2296 switch (action & ~CPU_TASKS_FROZEN) {
2298 __cpufreq_add_dev(dev, NULL);
2301 case CPU_DOWN_PREPARE:
2302 __cpufreq_remove_dev_prepare(dev, NULL);
2306 __cpufreq_remove_dev_finish(dev, NULL);
2309 case CPU_DOWN_FAILED:
2310 __cpufreq_add_dev(dev, NULL);
2317 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2318 .notifier_call = cpufreq_cpu_callback,
2321 /*********************************************************************
2323 *********************************************************************/
2324 static int cpufreq_boost_set_sw(int state)
2326 struct cpufreq_frequency_table *freq_table;
2327 struct cpufreq_policy *policy;
2330 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2331 freq_table = cpufreq_frequency_get_table(policy->cpu);
2333 ret = cpufreq_frequency_table_cpuinfo(policy,
2336 pr_err("%s: Policy frequency update failed\n",
2340 policy->user_policy.max = policy->max;
2341 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2348 int cpufreq_boost_trigger_state(int state)
2350 unsigned long flags;
2353 if (cpufreq_driver->boost_enabled == state)
2356 write_lock_irqsave(&cpufreq_driver_lock, flags);
2357 cpufreq_driver->boost_enabled = state;
2358 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2360 ret = cpufreq_driver->set_boost(state);
2362 write_lock_irqsave(&cpufreq_driver_lock, flags);
2363 cpufreq_driver->boost_enabled = !state;
2364 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2366 pr_err("%s: Cannot %s BOOST\n",
2367 __func__, state ? "enable" : "disable");
2373 int cpufreq_boost_supported(void)
2375 if (likely(cpufreq_driver))
2376 return cpufreq_driver->boost_supported;
2380 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2382 int cpufreq_boost_enabled(void)
2384 return cpufreq_driver->boost_enabled;
2386 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2388 /*********************************************************************
2389 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2390 *********************************************************************/
2393 * cpufreq_register_driver - register a CPU Frequency driver
2394 * @driver_data: A struct cpufreq_driver containing the values#
2395 * submitted by the CPU Frequency driver.
2397 * Registers a CPU Frequency driver to this core code. This code
2398 * returns zero on success, -EBUSY when another driver got here first
2399 * (and isn't unregistered in the meantime).
2402 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2404 unsigned long flags;
2407 if (cpufreq_disabled())
2410 if (!driver_data || !driver_data->verify || !driver_data->init ||
2411 !(driver_data->setpolicy || driver_data->target_index ||
2412 driver_data->target) ||
2413 (driver_data->setpolicy && (driver_data->target_index ||
2414 driver_data->target)) ||
2415 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2418 pr_debug("trying to register driver %s\n", driver_data->name);
2420 write_lock_irqsave(&cpufreq_driver_lock, flags);
2421 if (cpufreq_driver) {
2422 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2425 cpufreq_driver = driver_data;
2426 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2428 if (driver_data->setpolicy)
2429 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2431 if (cpufreq_boost_supported()) {
2433 * Check if driver provides function to enable boost -
2434 * if not, use cpufreq_boost_set_sw as default
2436 if (!cpufreq_driver->set_boost)
2437 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2439 ret = cpufreq_sysfs_create_file(&boost.attr);
2441 pr_err("%s: cannot register global BOOST sysfs file\n",
2443 goto err_null_driver;
2447 ret = subsys_interface_register(&cpufreq_interface);
2449 goto err_boost_unreg;
2451 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2452 list_empty(&cpufreq_policy_list)) {
2453 /* if all ->init() calls failed, unregister */
2454 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2459 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2460 pr_debug("driver %s up and running\n", driver_data->name);
2464 subsys_interface_unregister(&cpufreq_interface);
2466 if (cpufreq_boost_supported())
2467 cpufreq_sysfs_remove_file(&boost.attr);
2469 write_lock_irqsave(&cpufreq_driver_lock, flags);
2470 cpufreq_driver = NULL;
2471 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2474 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2477 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2479 * Unregister the current CPUFreq driver. Only call this if you have
2480 * the right to do so, i.e. if you have succeeded in initialising before!
2481 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2482 * currently not initialised.
2484 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2486 unsigned long flags;
2488 if (!cpufreq_driver || (driver != cpufreq_driver))
2491 pr_debug("unregistering driver %s\n", driver->name);
2493 subsys_interface_unregister(&cpufreq_interface);
2494 if (cpufreq_boost_supported())
2495 cpufreq_sysfs_remove_file(&boost.attr);
2497 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2499 down_write(&cpufreq_rwsem);
2500 write_lock_irqsave(&cpufreq_driver_lock, flags);
2502 cpufreq_driver = NULL;
2504 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2505 up_write(&cpufreq_rwsem);
2509 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2512 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2513 * or mutexes when secondary CPUs are halted.
2515 static struct syscore_ops cpufreq_syscore_ops = {
2516 .shutdown = cpufreq_suspend,
2519 static int __init cpufreq_core_init(void)
2521 if (cpufreq_disabled())
2524 cpufreq_global_kobject = kobject_create();
2525 BUG_ON(!cpufreq_global_kobject);
2527 register_syscore_ops(&cpufreq_syscore_ops);
2531 core_initcall(cpufreq_core_init);