2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
51 static inline bool has_target(void)
53 return cpufreq_driver->target_index || cpufreq_driver->target;
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
60 static DECLARE_RWSEM(cpufreq_rwsem);
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82 init_cpufreq_transition_notifier_list_called = true;
85 pure_initcall(init_cpufreq_transition_notifier_list);
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
92 void disable_cpufreq(void)
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
99 bool have_governor_per_policy(void)
101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
107 if (have_governor_per_policy())
108 return &policy->kobj;
110 return cpufreq_global_kobject;
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
129 idle_time = cur_wall_time - busy_time;
131 *wall = cputime_to_usecs(cur_wall_time);
133 return cputime_to_usecs(idle_time);
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
162 ret = cpufreq_table_validate_and_show(policy, table);
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
168 policy->cpuinfo.transition_latency = transition_latency;
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
174 cpumask_setall(policy->cpus);
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
180 unsigned int cpufreq_generic_get(unsigned int cpu)
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
184 if (!policy || IS_ERR(policy->clk)) {
185 pr_err("%s: No %s associated to cpu: %d\n",
186 __func__, policy ? "clk" : "policy", cpu);
190 return clk_get_rate(policy->clk) / 1000;
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
194 /* Only for cpufreq core internal use */
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 return per_cpu(cpufreq_cpu_data, cpu);
200 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
202 struct cpufreq_policy *policy = NULL;
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
208 if (!down_read_trylock(&cpufreq_rwsem))
211 /* get the cpufreq driver */
212 read_lock_irqsave(&cpufreq_driver_lock, flags);
214 if (cpufreq_driver) {
216 policy = per_cpu(cpufreq_cpu_data, cpu);
218 kobject_get(&policy->kobj);
221 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
224 up_read(&cpufreq_rwsem);
228 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230 void cpufreq_cpu_put(struct cpufreq_policy *policy)
232 if (cpufreq_disabled())
235 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
240 /*********************************************************************
241 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
242 *********************************************************************/
245 * adjust_jiffies - adjust the system "loops_per_jiffy"
247 * This function alters the system "loops_per_jiffy" for the clock
248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
249 * systems as each CPU might be scaled differently. So, use the arch
250 * per-CPU loops_per_jiffy value wherever possible.
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
256 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258 if (ci->flags & CPUFREQ_CONST_LOOPS)
261 if (!l_p_j_ref_freq) {
262 l_p_j_ref = loops_per_jiffy;
263 l_p_j_ref_freq = ci->old;
264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265 l_p_j_ref, l_p_j_ref_freq);
267 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
268 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
270 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271 loops_per_jiffy, ci->new);
275 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
281 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282 struct cpufreq_freqs *freqs, unsigned int state)
284 BUG_ON(irqs_disabled());
286 if (cpufreq_disabled())
289 freqs->flags = cpufreq_driver->flags;
290 pr_debug("notification %u of frequency transition to %u kHz\n",
295 case CPUFREQ_PRECHANGE:
296 /* detect if the driver reported a value as "old frequency"
297 * which is not equal to what the cpufreq core thinks is
300 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
301 if ((policy) && (policy->cpu == freqs->cpu) &&
302 (policy->cur) && (policy->cur != freqs->old)) {
303 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
304 freqs->old, policy->cur);
305 freqs->old = policy->cur;
308 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
309 CPUFREQ_PRECHANGE, freqs);
310 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
313 case CPUFREQ_POSTCHANGE:
314 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
315 pr_debug("FREQ: %lu - CPU: %lu\n",
316 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
317 trace_cpu_frequency(freqs->new, freqs->cpu);
318 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
319 CPUFREQ_POSTCHANGE, freqs);
320 if (likely(policy) && likely(policy->cpu == freqs->cpu))
321 policy->cur = freqs->new;
327 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
328 * on frequency transition.
330 * This function calls the transition notifiers and the "adjust_jiffies"
331 * function. It is called twice on all CPU frequency changes that have
334 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
335 struct cpufreq_freqs *freqs, unsigned int state)
337 for_each_cpu(freqs->cpu, policy->cpus)
338 __cpufreq_notify_transition(policy, freqs, state);
341 /* Do post notifications when there are chances that transition has failed */
342 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
343 struct cpufreq_freqs *freqs, int transition_failed)
345 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
346 if (!transition_failed)
349 swap(freqs->old, freqs->new);
350 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
351 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs)
359 * Catch double invocations of _begin() which lead to self-deadlock.
360 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
361 * doesn't invoke _begin() on their behalf, and hence the chances of
362 * double invocations are very low. Moreover, there are scenarios
363 * where these checks can emit false-positive warnings in these
364 * drivers; so we avoid that by skipping them altogether.
366 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
367 && current == policy->transition_task);
370 wait_event(policy->transition_wait, !policy->transition_ongoing);
372 spin_lock(&policy->transition_lock);
374 if (unlikely(policy->transition_ongoing)) {
375 spin_unlock(&policy->transition_lock);
379 policy->transition_ongoing = true;
380 policy->transition_task = current;
382 spin_unlock(&policy->transition_lock);
384 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
386 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
388 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
389 struct cpufreq_freqs *freqs, int transition_failed)
391 if (unlikely(WARN_ON(!policy->transition_ongoing)))
394 cpufreq_notify_post_transition(policy, freqs, transition_failed);
396 policy->transition_ongoing = false;
397 policy->transition_task = NULL;
399 wake_up(&policy->transition_wait);
401 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
404 /*********************************************************************
406 *********************************************************************/
407 static ssize_t show_boost(struct kobject *kobj,
408 struct attribute *attr, char *buf)
410 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
413 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
414 const char *buf, size_t count)
418 ret = sscanf(buf, "%d", &enable);
419 if (ret != 1 || enable < 0 || enable > 1)
422 if (cpufreq_boost_trigger_state(enable)) {
423 pr_err("%s: Cannot %s BOOST!\n",
424 __func__, enable ? "enable" : "disable");
428 pr_debug("%s: cpufreq BOOST %s\n",
429 __func__, enable ? "enabled" : "disabled");
433 define_one_global_rw(boost);
435 static struct cpufreq_governor *__find_governor(const char *str_governor)
437 struct cpufreq_governor *t;
439 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
440 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
447 * cpufreq_parse_governor - parse a governor string
449 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
450 struct cpufreq_governor **governor)
457 if (cpufreq_driver->setpolicy) {
458 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
459 *policy = CPUFREQ_POLICY_PERFORMANCE;
461 } else if (!strnicmp(str_governor, "powersave",
463 *policy = CPUFREQ_POLICY_POWERSAVE;
466 } else if (has_target()) {
467 struct cpufreq_governor *t;
469 mutex_lock(&cpufreq_governor_mutex);
471 t = __find_governor(str_governor);
476 mutex_unlock(&cpufreq_governor_mutex);
477 ret = request_module("cpufreq_%s", str_governor);
478 mutex_lock(&cpufreq_governor_mutex);
481 t = __find_governor(str_governor);
489 mutex_unlock(&cpufreq_governor_mutex);
496 * cpufreq_per_cpu_attr_read() / show_##file_name() -
497 * print out cpufreq information
499 * Write out information from cpufreq_driver->policy[cpu]; object must be
503 #define show_one(file_name, object) \
504 static ssize_t show_##file_name \
505 (struct cpufreq_policy *policy, char *buf) \
507 return sprintf(buf, "%u\n", policy->object); \
510 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
511 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513 show_one(scaling_min_freq, min);
514 show_one(scaling_max_freq, max);
515 show_one(scaling_cur_freq, cur);
517 static int cpufreq_set_policy(struct cpufreq_policy *policy,
518 struct cpufreq_policy *new_policy);
521 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
523 #define store_one(file_name, object) \
524 static ssize_t store_##file_name \
525 (struct cpufreq_policy *policy, const char *buf, size_t count) \
528 struct cpufreq_policy new_policy; \
530 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
534 ret = sscanf(buf, "%u", &new_policy.object); \
538 ret = cpufreq_set_policy(policy, &new_policy); \
539 policy->user_policy.object = policy->object; \
541 return ret ? ret : count; \
544 store_one(scaling_min_freq, min);
545 store_one(scaling_max_freq, max);
548 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
550 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
553 unsigned int cur_freq = __cpufreq_get(policy->cpu);
555 return sprintf(buf, "<unknown>");
556 return sprintf(buf, "%u\n", cur_freq);
560 * show_scaling_governor - show the current policy for the specified CPU
562 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
564 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
565 return sprintf(buf, "powersave\n");
566 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
567 return sprintf(buf, "performance\n");
568 else if (policy->governor)
569 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
570 policy->governor->name);
575 * store_scaling_governor - store policy for the specified CPU
577 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
578 const char *buf, size_t count)
581 char str_governor[16];
582 struct cpufreq_policy new_policy;
584 ret = cpufreq_get_policy(&new_policy, policy->cpu);
588 ret = sscanf(buf, "%15s", str_governor);
592 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
593 &new_policy.governor))
596 ret = cpufreq_set_policy(policy, &new_policy);
598 policy->user_policy.policy = policy->policy;
599 policy->user_policy.governor = policy->governor;
608 * show_scaling_driver - show the cpufreq driver currently loaded
610 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
612 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
616 * show_scaling_available_governors - show the available CPUfreq governors
618 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
622 struct cpufreq_governor *t;
625 i += sprintf(buf, "performance powersave");
629 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
630 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
631 - (CPUFREQ_NAME_LEN + 2)))
633 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
636 i += sprintf(&buf[i], "\n");
640 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
645 for_each_cpu(cpu, mask) {
647 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
648 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
649 if (i >= (PAGE_SIZE - 5))
652 i += sprintf(&buf[i], "\n");
655 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
658 * show_related_cpus - show the CPUs affected by each transition even if
659 * hw coordination is in use
661 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
663 return cpufreq_show_cpus(policy->related_cpus, buf);
667 * show_affected_cpus - show the CPUs affected by each transition
669 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
671 return cpufreq_show_cpus(policy->cpus, buf);
674 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
675 const char *buf, size_t count)
677 unsigned int freq = 0;
680 if (!policy->governor || !policy->governor->store_setspeed)
683 ret = sscanf(buf, "%u", &freq);
687 policy->governor->store_setspeed(policy, freq);
692 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
694 if (!policy->governor || !policy->governor->show_setspeed)
695 return sprintf(buf, "<unsupported>\n");
697 return policy->governor->show_setspeed(policy, buf);
701 * show_bios_limit - show the current cpufreq HW/BIOS limitation
703 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
707 if (cpufreq_driver->bios_limit) {
708 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
710 return sprintf(buf, "%u\n", limit);
712 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
715 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
716 cpufreq_freq_attr_ro(cpuinfo_min_freq);
717 cpufreq_freq_attr_ro(cpuinfo_max_freq);
718 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
719 cpufreq_freq_attr_ro(scaling_available_governors);
720 cpufreq_freq_attr_ro(scaling_driver);
721 cpufreq_freq_attr_ro(scaling_cur_freq);
722 cpufreq_freq_attr_ro(bios_limit);
723 cpufreq_freq_attr_ro(related_cpus);
724 cpufreq_freq_attr_ro(affected_cpus);
725 cpufreq_freq_attr_rw(scaling_min_freq);
726 cpufreq_freq_attr_rw(scaling_max_freq);
727 cpufreq_freq_attr_rw(scaling_governor);
728 cpufreq_freq_attr_rw(scaling_setspeed);
730 static struct attribute *default_attrs[] = {
731 &cpuinfo_min_freq.attr,
732 &cpuinfo_max_freq.attr,
733 &cpuinfo_transition_latency.attr,
734 &scaling_min_freq.attr,
735 &scaling_max_freq.attr,
738 &scaling_governor.attr,
739 &scaling_driver.attr,
740 &scaling_available_governors.attr,
741 &scaling_setspeed.attr,
745 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
746 #define to_attr(a) container_of(a, struct freq_attr, attr)
748 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
750 struct cpufreq_policy *policy = to_policy(kobj);
751 struct freq_attr *fattr = to_attr(attr);
754 if (!down_read_trylock(&cpufreq_rwsem))
757 down_read(&policy->rwsem);
760 ret = fattr->show(policy, buf);
764 up_read(&policy->rwsem);
765 up_read(&cpufreq_rwsem);
770 static ssize_t store(struct kobject *kobj, struct attribute *attr,
771 const char *buf, size_t count)
773 struct cpufreq_policy *policy = to_policy(kobj);
774 struct freq_attr *fattr = to_attr(attr);
775 ssize_t ret = -EINVAL;
779 if (!cpu_online(policy->cpu))
782 if (!down_read_trylock(&cpufreq_rwsem))
785 down_write(&policy->rwsem);
788 ret = fattr->store(policy, buf, count);
792 up_write(&policy->rwsem);
794 up_read(&cpufreq_rwsem);
801 static void cpufreq_sysfs_release(struct kobject *kobj)
803 struct cpufreq_policy *policy = to_policy(kobj);
804 pr_debug("last reference is dropped\n");
805 complete(&policy->kobj_unregister);
808 static const struct sysfs_ops sysfs_ops = {
813 static struct kobj_type ktype_cpufreq = {
814 .sysfs_ops = &sysfs_ops,
815 .default_attrs = default_attrs,
816 .release = cpufreq_sysfs_release,
819 struct kobject *cpufreq_global_kobject;
820 EXPORT_SYMBOL(cpufreq_global_kobject);
822 static int cpufreq_global_kobject_usage;
824 int cpufreq_get_global_kobject(void)
826 if (!cpufreq_global_kobject_usage++)
827 return kobject_add(cpufreq_global_kobject,
828 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
832 EXPORT_SYMBOL(cpufreq_get_global_kobject);
834 void cpufreq_put_global_kobject(void)
836 if (!--cpufreq_global_kobject_usage)
837 kobject_del(cpufreq_global_kobject);
839 EXPORT_SYMBOL(cpufreq_put_global_kobject);
841 int cpufreq_sysfs_create_file(const struct attribute *attr)
843 int ret = cpufreq_get_global_kobject();
846 ret = sysfs_create_file(cpufreq_global_kobject, attr);
848 cpufreq_put_global_kobject();
853 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
855 void cpufreq_sysfs_remove_file(const struct attribute *attr)
857 sysfs_remove_file(cpufreq_global_kobject, attr);
858 cpufreq_put_global_kobject();
860 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
862 /* symlink affected CPUs */
863 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
868 for_each_cpu(j, policy->cpus) {
869 struct device *cpu_dev;
871 if (j == policy->cpu)
874 pr_debug("Adding link for CPU: %u\n", j);
875 cpu_dev = get_cpu_device(j);
876 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
884 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
887 struct freq_attr **drv_attr;
890 /* prepare interface data */
891 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
892 &dev->kobj, "cpufreq");
896 /* set up files for this cpu device */
897 drv_attr = cpufreq_driver->attr;
898 while ((drv_attr) && (*drv_attr)) {
899 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
901 goto err_out_kobj_put;
904 if (cpufreq_driver->get) {
905 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
907 goto err_out_kobj_put;
910 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
912 goto err_out_kobj_put;
914 if (cpufreq_driver->bios_limit) {
915 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
917 goto err_out_kobj_put;
920 ret = cpufreq_add_dev_symlink(policy);
922 goto err_out_kobj_put;
927 kobject_put(&policy->kobj);
928 wait_for_completion(&policy->kobj_unregister);
932 static void cpufreq_init_policy(struct cpufreq_policy *policy)
934 struct cpufreq_governor *gov = NULL;
935 struct cpufreq_policy new_policy;
938 memcpy(&new_policy, policy, sizeof(*policy));
940 /* Update governor of new_policy to the governor used before hotplug */
941 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
943 pr_debug("Restoring governor %s for cpu %d\n",
944 policy->governor->name, policy->cpu);
946 gov = CPUFREQ_DEFAULT_GOVERNOR;
948 new_policy.governor = gov;
950 /* Use the default policy if its valid. */
951 if (cpufreq_driver->setpolicy)
952 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
954 /* set default policy */
955 ret = cpufreq_set_policy(policy, &new_policy);
957 pr_debug("setting policy failed\n");
958 if (cpufreq_driver->exit)
959 cpufreq_driver->exit(policy);
963 #ifdef CONFIG_HOTPLUG_CPU
964 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
965 unsigned int cpu, struct device *dev)
971 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
973 pr_err("%s: Failed to stop governor\n", __func__);
978 down_write(&policy->rwsem);
980 write_lock_irqsave(&cpufreq_driver_lock, flags);
982 cpumask_set_cpu(cpu, policy->cpus);
983 per_cpu(cpufreq_cpu_data, cpu) = policy;
984 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
986 up_write(&policy->rwsem);
989 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
991 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
994 pr_err("%s: Failed to start governor\n", __func__);
999 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
1003 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1005 struct cpufreq_policy *policy;
1006 unsigned long flags;
1008 read_lock_irqsave(&cpufreq_driver_lock, flags);
1010 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1012 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1014 policy->governor = NULL;
1019 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1021 struct cpufreq_policy *policy;
1023 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1027 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1028 goto err_free_policy;
1030 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1031 goto err_free_cpumask;
1033 INIT_LIST_HEAD(&policy->policy_list);
1034 init_rwsem(&policy->rwsem);
1035 spin_lock_init(&policy->transition_lock);
1036 init_waitqueue_head(&policy->transition_wait);
1041 free_cpumask_var(policy->cpus);
1048 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1050 struct kobject *kobj;
1051 struct completion *cmp;
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_REMOVE_POLICY, policy);
1056 down_read(&policy->rwsem);
1057 kobj = &policy->kobj;
1058 cmp = &policy->kobj_unregister;
1059 up_read(&policy->rwsem);
1063 * We need to make sure that the underlying kobj is
1064 * actually not referenced anymore by anybody before we
1065 * proceed with unloading.
1067 pr_debug("waiting for dropping of refcount\n");
1068 wait_for_completion(cmp);
1069 pr_debug("wait complete\n");
1072 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1074 free_cpumask_var(policy->related_cpus);
1075 free_cpumask_var(policy->cpus);
1079 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1080 struct device *cpu_dev)
1084 if (WARN_ON(cpu == policy->cpu))
1087 /* Move kobject to the new policy->cpu */
1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1090 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1094 down_write(&policy->rwsem);
1096 policy->last_cpu = policy->cpu;
1099 up_write(&policy->rwsem);
1101 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1102 CPUFREQ_UPDATE_POLICY_CPU, policy);
1107 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1109 unsigned int j, cpu = dev->id;
1111 struct cpufreq_policy *policy;
1112 unsigned long flags;
1113 bool recover_policy = cpufreq_suspended;
1114 #ifdef CONFIG_HOTPLUG_CPU
1115 struct cpufreq_policy *tpolicy;
1118 if (cpu_is_offline(cpu))
1121 pr_debug("adding CPU %u\n", cpu);
1124 /* check whether a different CPU already registered this
1125 * CPU because it is in the same boat. */
1126 policy = cpufreq_cpu_get(cpu);
1127 if (unlikely(policy)) {
1128 cpufreq_cpu_put(policy);
1133 if (!down_read_trylock(&cpufreq_rwsem))
1136 #ifdef CONFIG_HOTPLUG_CPU
1137 /* Check if this cpu was hot-unplugged earlier and has siblings */
1138 read_lock_irqsave(&cpufreq_driver_lock, flags);
1139 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1140 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1141 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1142 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1143 up_read(&cpufreq_rwsem);
1147 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1151 * Restore the saved policy when doing light-weight init and fall back
1152 * to the full init if that fails.
1154 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1156 recover_policy = false;
1157 policy = cpufreq_policy_alloc();
1163 * In the resume path, since we restore a saved policy, the assignment
1164 * to policy->cpu is like an update of the existing policy, rather than
1165 * the creation of a brand new one. So we need to perform this update
1166 * by invoking update_policy_cpu().
1168 if (recover_policy && cpu != policy->cpu)
1169 WARN_ON(update_policy_cpu(policy, cpu, dev));
1173 cpumask_copy(policy->cpus, cpumask_of(cpu));
1175 init_completion(&policy->kobj_unregister);
1176 INIT_WORK(&policy->update, handle_update);
1178 /* call driver. From then on the cpufreq must be able
1179 * to accept all calls to ->verify and ->setpolicy for this CPU
1181 ret = cpufreq_driver->init(policy);
1183 pr_debug("initialization failed\n");
1184 goto err_set_policy_cpu;
1187 /* related cpus should atleast have policy->cpus */
1188 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1191 * affected cpus must always be the one, which are online. We aren't
1192 * managing offline cpus here.
1194 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1196 if (!recover_policy) {
1197 policy->user_policy.min = policy->min;
1198 policy->user_policy.max = policy->max;
1201 down_write(&policy->rwsem);
1202 write_lock_irqsave(&cpufreq_driver_lock, flags);
1203 for_each_cpu(j, policy->cpus)
1204 per_cpu(cpufreq_cpu_data, j) = policy;
1205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1207 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1208 policy->cur = cpufreq_driver->get(policy->cpu);
1210 pr_err("%s: ->get() failed\n", __func__);
1216 * Sometimes boot loaders set CPU frequency to a value outside of
1217 * frequency table present with cpufreq core. In such cases CPU might be
1218 * unstable if it has to run on that frequency for long duration of time
1219 * and so its better to set it to a frequency which is specified in
1220 * freq-table. This also makes cpufreq stats inconsistent as
1221 * cpufreq-stats would fail to register because current frequency of CPU
1222 * isn't found in freq-table.
1224 * Because we don't want this change to effect boot process badly, we go
1225 * for the next freq which is >= policy->cur ('cur' must be set by now,
1226 * otherwise we will end up setting freq to lowest of the table as 'cur'
1227 * is initialized to zero).
1229 * We are passing target-freq as "policy->cur - 1" otherwise
1230 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1231 * equal to target-freq.
1233 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1235 /* Are we running at unknown frequency ? */
1236 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1237 if (ret == -EINVAL) {
1238 /* Warn user and fix it */
1239 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1240 __func__, policy->cpu, policy->cur);
1241 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1242 CPUFREQ_RELATION_L);
1245 * Reaching here after boot in a few seconds may not
1246 * mean that system will remain stable at "unknown"
1247 * frequency for longer duration. Hence, a BUG_ON().
1250 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1251 __func__, policy->cpu, policy->cur);
1255 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1256 CPUFREQ_START, policy);
1258 if (!recover_policy) {
1259 ret = cpufreq_add_dev_interface(policy, dev);
1261 goto err_out_unregister;
1262 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1263 CPUFREQ_CREATE_POLICY, policy);
1266 write_lock_irqsave(&cpufreq_driver_lock, flags);
1267 list_add(&policy->policy_list, &cpufreq_policy_list);
1268 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1270 cpufreq_init_policy(policy);
1272 if (!recover_policy) {
1273 policy->user_policy.policy = policy->policy;
1274 policy->user_policy.governor = policy->governor;
1276 up_write(&policy->rwsem);
1278 kobject_uevent(&policy->kobj, KOBJ_ADD);
1279 up_read(&cpufreq_rwsem);
1281 pr_debug("initialization complete\n");
1287 write_lock_irqsave(&cpufreq_driver_lock, flags);
1288 for_each_cpu(j, policy->cpus)
1289 per_cpu(cpufreq_cpu_data, j) = NULL;
1290 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1292 if (cpufreq_driver->exit)
1293 cpufreq_driver->exit(policy);
1295 if (recover_policy) {
1296 /* Do not leave stale fallback data behind. */
1297 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1298 cpufreq_policy_put_kobj(policy);
1300 cpufreq_policy_free(policy);
1303 up_read(&cpufreq_rwsem);
1309 * cpufreq_add_dev - add a CPU device
1311 * Adds the cpufreq interface for a CPU device.
1313 * The Oracle says: try running cpufreq registration/unregistration concurrently
1314 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1315 * mess up, but more thorough testing is needed. - Mathieu
1317 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1319 return __cpufreq_add_dev(dev, sif);
1322 static int __cpufreq_remove_dev_prepare(struct device *dev,
1323 struct subsys_interface *sif)
1325 unsigned int cpu = dev->id, cpus;
1327 unsigned long flags;
1328 struct cpufreq_policy *policy;
1330 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1332 write_lock_irqsave(&cpufreq_driver_lock, flags);
1334 policy = per_cpu(cpufreq_cpu_data, cpu);
1336 /* Save the policy somewhere when doing a light-weight tear-down */
1337 if (cpufreq_suspended)
1338 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1340 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1343 pr_debug("%s: No cpu_data found\n", __func__);
1348 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1350 pr_err("%s: Failed to stop governor\n", __func__);
1355 if (!cpufreq_driver->setpolicy)
1356 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1357 policy->governor->name, CPUFREQ_NAME_LEN);
1359 down_read(&policy->rwsem);
1360 cpus = cpumask_weight(policy->cpus);
1361 up_read(&policy->rwsem);
1363 if (cpu != policy->cpu) {
1364 sysfs_remove_link(&dev->kobj, "cpufreq");
1365 } else if (cpus > 1) {
1366 /* Nominate new CPU */
1367 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1368 struct device *cpu_dev = get_cpu_device(new_cpu);
1370 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1371 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1373 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1375 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1376 __func__, cpu_dev->id);
1380 if (!cpufreq_suspended)
1381 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1382 __func__, new_cpu, cpu);
1383 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1384 cpufreq_driver->stop_cpu(policy);
1390 static int __cpufreq_remove_dev_finish(struct device *dev,
1391 struct subsys_interface *sif)
1393 unsigned int cpu = dev->id, cpus;
1395 unsigned long flags;
1396 struct cpufreq_policy *policy;
1398 read_lock_irqsave(&cpufreq_driver_lock, flags);
1399 policy = per_cpu(cpufreq_cpu_data, cpu);
1400 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1403 pr_debug("%s: No cpu_data found\n", __func__);
1407 down_write(&policy->rwsem);
1408 cpus = cpumask_weight(policy->cpus);
1411 cpumask_clear_cpu(cpu, policy->cpus);
1412 up_write(&policy->rwsem);
1414 /* If cpu is last user of policy, free policy */
1417 ret = __cpufreq_governor(policy,
1418 CPUFREQ_GOV_POLICY_EXIT);
1420 pr_err("%s: Failed to exit governor\n",
1426 if (!cpufreq_suspended)
1427 cpufreq_policy_put_kobj(policy);
1430 * Perform the ->exit() even during light-weight tear-down,
1431 * since this is a core component, and is essential for the
1432 * subsequent light-weight ->init() to succeed.
1434 if (cpufreq_driver->exit)
1435 cpufreq_driver->exit(policy);
1437 /* Remove policy from list of active policies */
1438 write_lock_irqsave(&cpufreq_driver_lock, flags);
1439 list_del(&policy->policy_list);
1440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1442 if (!cpufreq_suspended)
1443 cpufreq_policy_free(policy);
1444 } else if (has_target()) {
1445 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1447 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1450 pr_err("%s: Failed to start governor\n", __func__);
1455 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1460 * cpufreq_remove_dev - remove a CPU device
1462 * Removes the cpufreq interface for a CPU device.
1464 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1466 unsigned int cpu = dev->id;
1469 if (cpu_is_offline(cpu))
1472 ret = __cpufreq_remove_dev_prepare(dev, sif);
1475 ret = __cpufreq_remove_dev_finish(dev, sif);
1480 static void handle_update(struct work_struct *work)
1482 struct cpufreq_policy *policy =
1483 container_of(work, struct cpufreq_policy, update);
1484 unsigned int cpu = policy->cpu;
1485 pr_debug("handle_update for cpu %u called\n", cpu);
1486 cpufreq_update_policy(cpu);
1490 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1493 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1494 * @new_freq: CPU frequency the CPU actually runs at
1496 * We adjust to current frequency first, and need to clean up later.
1497 * So either call to cpufreq_update_policy() or schedule handle_update()).
1499 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1500 unsigned int new_freq)
1502 struct cpufreq_policy *policy;
1503 struct cpufreq_freqs freqs;
1504 unsigned long flags;
1506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1507 old_freq, new_freq);
1509 freqs.old = old_freq;
1510 freqs.new = new_freq;
1512 read_lock_irqsave(&cpufreq_driver_lock, flags);
1513 policy = per_cpu(cpufreq_cpu_data, cpu);
1514 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1516 cpufreq_freq_transition_begin(policy, &freqs);
1517 cpufreq_freq_transition_end(policy, &freqs, 0);
1521 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1524 * This is the last known freq, without actually getting it from the driver.
1525 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1527 unsigned int cpufreq_quick_get(unsigned int cpu)
1529 struct cpufreq_policy *policy;
1530 unsigned int ret_freq = 0;
1532 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1533 return cpufreq_driver->get(cpu);
1535 policy = cpufreq_cpu_get(cpu);
1537 ret_freq = policy->cur;
1538 cpufreq_cpu_put(policy);
1543 EXPORT_SYMBOL(cpufreq_quick_get);
1546 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1549 * Just return the max possible frequency for a given CPU.
1551 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1553 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1554 unsigned int ret_freq = 0;
1557 ret_freq = policy->max;
1558 cpufreq_cpu_put(policy);
1563 EXPORT_SYMBOL(cpufreq_quick_get_max);
1565 static unsigned int __cpufreq_get(unsigned int cpu)
1567 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1568 unsigned int ret_freq = 0;
1570 if (!cpufreq_driver->get)
1573 ret_freq = cpufreq_driver->get(cpu);
1575 if (ret_freq && policy->cur &&
1576 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1577 /* verify no discrepancy between actual and
1578 saved value exists */
1579 if (unlikely(ret_freq != policy->cur)) {
1580 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1581 schedule_work(&policy->update);
1589 * cpufreq_get - get the current CPU frequency (in kHz)
1592 * Get the CPU current (static) CPU frequency
1594 unsigned int cpufreq_get(unsigned int cpu)
1596 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1597 unsigned int ret_freq = 0;
1600 down_read(&policy->rwsem);
1601 ret_freq = __cpufreq_get(cpu);
1602 up_read(&policy->rwsem);
1604 cpufreq_cpu_put(policy);
1609 EXPORT_SYMBOL(cpufreq_get);
1611 static struct subsys_interface cpufreq_interface = {
1613 .subsys = &cpu_subsys,
1614 .add_dev = cpufreq_add_dev,
1615 .remove_dev = cpufreq_remove_dev,
1619 * In case platform wants some specific frequency to be configured
1622 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1626 if (!policy->suspend_freq) {
1627 pr_err("%s: suspend_freq can't be zero\n", __func__);
1631 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1632 policy->suspend_freq);
1634 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1635 CPUFREQ_RELATION_H);
1637 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1638 __func__, policy->suspend_freq, ret);
1642 EXPORT_SYMBOL(cpufreq_generic_suspend);
1645 * cpufreq_suspend() - Suspend CPUFreq governors
1647 * Called during system wide Suspend/Hibernate cycles for suspending governors
1648 * as some platforms can't change frequency after this point in suspend cycle.
1649 * Because some of the devices (like: i2c, regulators, etc) they use for
1650 * changing frequency are suspended quickly after this point.
1652 void cpufreq_suspend(void)
1654 struct cpufreq_policy *policy;
1656 if (!cpufreq_driver)
1662 pr_debug("%s: Suspending Governors\n", __func__);
1664 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1665 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1666 pr_err("%s: Failed to stop governor for policy: %p\n",
1668 else if (cpufreq_driver->suspend
1669 && cpufreq_driver->suspend(policy))
1670 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1674 cpufreq_suspended = true;
1678 * cpufreq_resume() - Resume CPUFreq governors
1680 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1681 * are suspended with cpufreq_suspend().
1683 void cpufreq_resume(void)
1685 struct cpufreq_policy *policy;
1687 if (!cpufreq_driver)
1693 pr_debug("%s: Resuming Governors\n", __func__);
1695 cpufreq_suspended = false;
1697 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1698 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1699 pr_err("%s: Failed to resume driver: %p\n", __func__,
1701 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1702 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1703 pr_err("%s: Failed to start governor for policy: %p\n",
1707 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1708 * policy in list. It will verify that the current freq is in
1709 * sync with what we believe it to be.
1711 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1712 schedule_work(&policy->update);
1717 * cpufreq_get_current_driver - return current driver's name
1719 * Return the name string of the currently loaded cpufreq driver
1722 const char *cpufreq_get_current_driver(void)
1725 return cpufreq_driver->name;
1729 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1731 /*********************************************************************
1732 * NOTIFIER LISTS INTERFACE *
1733 *********************************************************************/
1736 * cpufreq_register_notifier - register a driver with cpufreq
1737 * @nb: notifier function to register
1738 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1740 * Add a driver to one of two lists: either a list of drivers that
1741 * are notified about clock rate changes (once before and once after
1742 * the transition), or a list of drivers that are notified about
1743 * changes in cpufreq policy.
1745 * This function may sleep, and has the same return conditions as
1746 * blocking_notifier_chain_register.
1748 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1752 if (cpufreq_disabled())
1755 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1758 case CPUFREQ_TRANSITION_NOTIFIER:
1759 ret = srcu_notifier_chain_register(
1760 &cpufreq_transition_notifier_list, nb);
1762 case CPUFREQ_POLICY_NOTIFIER:
1763 ret = blocking_notifier_chain_register(
1764 &cpufreq_policy_notifier_list, nb);
1772 EXPORT_SYMBOL(cpufreq_register_notifier);
1775 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1776 * @nb: notifier block to be unregistered
1777 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1779 * Remove a driver from the CPU frequency notifier list.
1781 * This function may sleep, and has the same return conditions as
1782 * blocking_notifier_chain_unregister.
1784 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1788 if (cpufreq_disabled())
1792 case CPUFREQ_TRANSITION_NOTIFIER:
1793 ret = srcu_notifier_chain_unregister(
1794 &cpufreq_transition_notifier_list, nb);
1796 case CPUFREQ_POLICY_NOTIFIER:
1797 ret = blocking_notifier_chain_unregister(
1798 &cpufreq_policy_notifier_list, nb);
1806 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1809 /*********************************************************************
1811 *********************************************************************/
1813 /* Must set freqs->new to intermediate frequency */
1814 static int __target_intermediate(struct cpufreq_policy *policy,
1815 struct cpufreq_freqs *freqs, int index)
1819 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1821 /* We don't need to switch to intermediate freq */
1825 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1826 __func__, policy->cpu, freqs->old, freqs->new);
1828 cpufreq_freq_transition_begin(policy, freqs);
1829 ret = cpufreq_driver->target_intermediate(policy, index);
1830 cpufreq_freq_transition_end(policy, freqs, ret);
1833 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1839 static int __target_index(struct cpufreq_policy *policy,
1840 struct cpufreq_frequency_table *freq_table, int index)
1842 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1843 unsigned int intermediate_freq = 0;
1844 int retval = -EINVAL;
1847 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1849 /* Handle switching to intermediate frequency */
1850 if (cpufreq_driver->get_intermediate) {
1851 retval = __target_intermediate(policy, &freqs, index);
1855 intermediate_freq = freqs.new;
1856 /* Set old freq to intermediate */
1857 if (intermediate_freq)
1858 freqs.old = freqs.new;
1861 freqs.new = freq_table[index].frequency;
1862 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1863 __func__, policy->cpu, freqs.old, freqs.new);
1865 cpufreq_freq_transition_begin(policy, &freqs);
1868 retval = cpufreq_driver->target_index(policy, index);
1870 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1874 cpufreq_freq_transition_end(policy, &freqs, retval);
1877 * Failed after setting to intermediate freq? Driver should have
1878 * reverted back to initial frequency and so should we. Check
1879 * here for intermediate_freq instead of get_intermediate, in
1880 * case we have't switched to intermediate freq at all.
1882 if (unlikely(retval && intermediate_freq)) {
1883 freqs.old = intermediate_freq;
1884 freqs.new = policy->restore_freq;
1885 cpufreq_freq_transition_begin(policy, &freqs);
1886 cpufreq_freq_transition_end(policy, &freqs, 0);
1893 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1894 unsigned int target_freq,
1895 unsigned int relation)
1897 unsigned int old_target_freq = target_freq;
1898 int retval = -EINVAL;
1900 if (cpufreq_disabled())
1903 /* Make sure that target_freq is within supported range */
1904 if (target_freq > policy->max)
1905 target_freq = policy->max;
1906 if (target_freq < policy->min)
1907 target_freq = policy->min;
1909 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1910 policy->cpu, target_freq, relation, old_target_freq);
1913 * This might look like a redundant call as we are checking it again
1914 * after finding index. But it is left intentionally for cases where
1915 * exactly same freq is called again and so we can save on few function
1918 if (target_freq == policy->cur)
1921 /* Save last value to restore later on errors */
1922 policy->restore_freq = policy->cur;
1924 if (cpufreq_driver->target)
1925 retval = cpufreq_driver->target(policy, target_freq, relation);
1926 else if (cpufreq_driver->target_index) {
1927 struct cpufreq_frequency_table *freq_table;
1930 freq_table = cpufreq_frequency_get_table(policy->cpu);
1931 if (unlikely(!freq_table)) {
1932 pr_err("%s: Unable to find freq_table\n", __func__);
1936 retval = cpufreq_frequency_table_target(policy, freq_table,
1937 target_freq, relation, &index);
1938 if (unlikely(retval)) {
1939 pr_err("%s: Unable to find matching freq\n", __func__);
1943 if (freq_table[index].frequency == policy->cur) {
1948 retval = __target_index(policy, freq_table, index);
1954 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1956 int cpufreq_driver_target(struct cpufreq_policy *policy,
1957 unsigned int target_freq,
1958 unsigned int relation)
1962 down_write(&policy->rwsem);
1964 ret = __cpufreq_driver_target(policy, target_freq, relation);
1966 up_write(&policy->rwsem);
1970 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1973 * when "event" is CPUFREQ_GOV_LIMITS
1976 static int __cpufreq_governor(struct cpufreq_policy *policy,
1981 /* Only must be defined when default governor is known to have latency
1982 restrictions, like e.g. conservative or ondemand.
1983 That this is the case is already ensured in Kconfig
1985 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1986 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1988 struct cpufreq_governor *gov = NULL;
1991 /* Don't start any governor operations if we are entering suspend */
1992 if (cpufreq_suspended)
1995 if (policy->governor->max_transition_latency &&
1996 policy->cpuinfo.transition_latency >
1997 policy->governor->max_transition_latency) {
2001 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2002 policy->governor->name, gov->name);
2003 policy->governor = gov;
2007 if (event == CPUFREQ_GOV_POLICY_INIT)
2008 if (!try_module_get(policy->governor->owner))
2011 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2012 policy->cpu, event);
2014 mutex_lock(&cpufreq_governor_lock);
2015 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2016 || (!policy->governor_enabled
2017 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2018 mutex_unlock(&cpufreq_governor_lock);
2022 if (event == CPUFREQ_GOV_STOP)
2023 policy->governor_enabled = false;
2024 else if (event == CPUFREQ_GOV_START)
2025 policy->governor_enabled = true;
2027 mutex_unlock(&cpufreq_governor_lock);
2029 ret = policy->governor->governor(policy, event);
2032 if (event == CPUFREQ_GOV_POLICY_INIT)
2033 policy->governor->initialized++;
2034 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2035 policy->governor->initialized--;
2037 /* Restore original values */
2038 mutex_lock(&cpufreq_governor_lock);
2039 if (event == CPUFREQ_GOV_STOP)
2040 policy->governor_enabled = true;
2041 else if (event == CPUFREQ_GOV_START)
2042 policy->governor_enabled = false;
2043 mutex_unlock(&cpufreq_governor_lock);
2046 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2047 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2048 module_put(policy->governor->owner);
2053 int cpufreq_register_governor(struct cpufreq_governor *governor)
2060 if (cpufreq_disabled())
2063 mutex_lock(&cpufreq_governor_mutex);
2065 governor->initialized = 0;
2067 if (__find_governor(governor->name) == NULL) {
2069 list_add(&governor->governor_list, &cpufreq_governor_list);
2072 mutex_unlock(&cpufreq_governor_mutex);
2075 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2077 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2084 if (cpufreq_disabled())
2087 for_each_present_cpu(cpu) {
2088 if (cpu_online(cpu))
2090 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2091 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2094 mutex_lock(&cpufreq_governor_mutex);
2095 list_del(&governor->governor_list);
2096 mutex_unlock(&cpufreq_governor_mutex);
2099 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2102 /*********************************************************************
2103 * POLICY INTERFACE *
2104 *********************************************************************/
2107 * cpufreq_get_policy - get the current cpufreq_policy
2108 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2111 * Reads the current cpufreq policy.
2113 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2115 struct cpufreq_policy *cpu_policy;
2119 cpu_policy = cpufreq_cpu_get(cpu);
2123 memcpy(policy, cpu_policy, sizeof(*policy));
2125 cpufreq_cpu_put(cpu_policy);
2128 EXPORT_SYMBOL(cpufreq_get_policy);
2131 * policy : current policy.
2132 * new_policy: policy to be set.
2134 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2135 struct cpufreq_policy *new_policy)
2137 struct cpufreq_governor *old_gov;
2140 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2141 new_policy->cpu, new_policy->min, new_policy->max);
2143 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2145 if (new_policy->min > policy->max || new_policy->max < policy->min)
2148 /* verify the cpu speed can be set within this limit */
2149 ret = cpufreq_driver->verify(new_policy);
2153 /* adjust if necessary - all reasons */
2154 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2155 CPUFREQ_ADJUST, new_policy);
2157 /* adjust if necessary - hardware incompatibility*/
2158 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2159 CPUFREQ_INCOMPATIBLE, new_policy);
2162 * verify the cpu speed can be set within this limit, which might be
2163 * different to the first one
2165 ret = cpufreq_driver->verify(new_policy);
2169 /* notification of the new policy */
2170 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2171 CPUFREQ_NOTIFY, new_policy);
2173 policy->min = new_policy->min;
2174 policy->max = new_policy->max;
2176 pr_debug("new min and max freqs are %u - %u kHz\n",
2177 policy->min, policy->max);
2179 if (cpufreq_driver->setpolicy) {
2180 policy->policy = new_policy->policy;
2181 pr_debug("setting range\n");
2182 return cpufreq_driver->setpolicy(new_policy);
2185 if (new_policy->governor == policy->governor)
2188 pr_debug("governor switch\n");
2190 /* save old, working values */
2191 old_gov = policy->governor;
2192 /* end old governor */
2194 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2195 up_write(&policy->rwsem);
2196 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2197 down_write(&policy->rwsem);
2200 /* start new governor */
2201 policy->governor = new_policy->governor;
2202 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2203 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2206 up_write(&policy->rwsem);
2207 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2208 down_write(&policy->rwsem);
2211 /* new governor failed, so re-start old one */
2212 pr_debug("starting governor %s failed\n", policy->governor->name);
2214 policy->governor = old_gov;
2215 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2216 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2222 pr_debug("governor: change or update limits\n");
2223 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2227 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2228 * @cpu: CPU which shall be re-evaluated
2230 * Useful for policy notifiers which have different necessities
2231 * at different times.
2233 int cpufreq_update_policy(unsigned int cpu)
2235 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2236 struct cpufreq_policy new_policy;
2242 down_write(&policy->rwsem);
2244 pr_debug("updating policy for CPU %u\n", cpu);
2245 memcpy(&new_policy, policy, sizeof(*policy));
2246 new_policy.min = policy->user_policy.min;
2247 new_policy.max = policy->user_policy.max;
2248 new_policy.policy = policy->user_policy.policy;
2249 new_policy.governor = policy->user_policy.governor;
2252 * BIOS might change freq behind our back
2253 * -> ask driver for current freq and notify governors about a change
2255 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2256 new_policy.cur = cpufreq_driver->get(cpu);
2257 if (WARN_ON(!new_policy.cur)) {
2263 pr_debug("Driver did not initialize current freq\n");
2264 policy->cur = new_policy.cur;
2266 if (policy->cur != new_policy.cur && has_target())
2267 cpufreq_out_of_sync(cpu, policy->cur,
2272 ret = cpufreq_set_policy(policy, &new_policy);
2275 up_write(&policy->rwsem);
2277 cpufreq_cpu_put(policy);
2280 EXPORT_SYMBOL(cpufreq_update_policy);
2282 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2283 unsigned long action, void *hcpu)
2285 unsigned int cpu = (unsigned long)hcpu;
2288 dev = get_cpu_device(cpu);
2290 switch (action & ~CPU_TASKS_FROZEN) {
2292 __cpufreq_add_dev(dev, NULL);
2295 case CPU_DOWN_PREPARE:
2296 __cpufreq_remove_dev_prepare(dev, NULL);
2300 __cpufreq_remove_dev_finish(dev, NULL);
2303 case CPU_DOWN_FAILED:
2304 __cpufreq_add_dev(dev, NULL);
2311 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2312 .notifier_call = cpufreq_cpu_callback,
2315 /*********************************************************************
2317 *********************************************************************/
2318 static int cpufreq_boost_set_sw(int state)
2320 struct cpufreq_frequency_table *freq_table;
2321 struct cpufreq_policy *policy;
2324 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2325 freq_table = cpufreq_frequency_get_table(policy->cpu);
2327 ret = cpufreq_frequency_table_cpuinfo(policy,
2330 pr_err("%s: Policy frequency update failed\n",
2334 policy->user_policy.max = policy->max;
2335 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2342 int cpufreq_boost_trigger_state(int state)
2344 unsigned long flags;
2347 if (cpufreq_driver->boost_enabled == state)
2350 write_lock_irqsave(&cpufreq_driver_lock, flags);
2351 cpufreq_driver->boost_enabled = state;
2352 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2354 ret = cpufreq_driver->set_boost(state);
2356 write_lock_irqsave(&cpufreq_driver_lock, flags);
2357 cpufreq_driver->boost_enabled = !state;
2358 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2360 pr_err("%s: Cannot %s BOOST\n",
2361 __func__, state ? "enable" : "disable");
2367 int cpufreq_boost_supported(void)
2369 if (likely(cpufreq_driver))
2370 return cpufreq_driver->boost_supported;
2374 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2376 int cpufreq_boost_enabled(void)
2378 return cpufreq_driver->boost_enabled;
2380 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2382 /*********************************************************************
2383 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2384 *********************************************************************/
2387 * cpufreq_register_driver - register a CPU Frequency driver
2388 * @driver_data: A struct cpufreq_driver containing the values#
2389 * submitted by the CPU Frequency driver.
2391 * Registers a CPU Frequency driver to this core code. This code
2392 * returns zero on success, -EBUSY when another driver got here first
2393 * (and isn't unregistered in the meantime).
2396 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2398 unsigned long flags;
2401 if (cpufreq_disabled())
2404 if (!driver_data || !driver_data->verify || !driver_data->init ||
2405 !(driver_data->setpolicy || driver_data->target_index ||
2406 driver_data->target) ||
2407 (driver_data->setpolicy && (driver_data->target_index ||
2408 driver_data->target)) ||
2409 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2412 pr_debug("trying to register driver %s\n", driver_data->name);
2414 if (driver_data->setpolicy)
2415 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2417 write_lock_irqsave(&cpufreq_driver_lock, flags);
2418 if (cpufreq_driver) {
2419 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2422 cpufreq_driver = driver_data;
2423 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2425 if (cpufreq_boost_supported()) {
2427 * Check if driver provides function to enable boost -
2428 * if not, use cpufreq_boost_set_sw as default
2430 if (!cpufreq_driver->set_boost)
2431 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2433 ret = cpufreq_sysfs_create_file(&boost.attr);
2435 pr_err("%s: cannot register global BOOST sysfs file\n",
2437 goto err_null_driver;
2441 ret = subsys_interface_register(&cpufreq_interface);
2443 goto err_boost_unreg;
2445 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2449 /* check for at least one working CPU */
2450 for (i = 0; i < nr_cpu_ids; i++)
2451 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2456 /* if all ->init() calls failed, unregister */
2458 pr_debug("no CPU initialized for driver %s\n",
2464 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2465 pr_debug("driver %s up and running\n", driver_data->name);
2469 subsys_interface_unregister(&cpufreq_interface);
2471 if (cpufreq_boost_supported())
2472 cpufreq_sysfs_remove_file(&boost.attr);
2474 write_lock_irqsave(&cpufreq_driver_lock, flags);
2475 cpufreq_driver = NULL;
2476 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2479 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2482 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2484 * Unregister the current CPUFreq driver. Only call this if you have
2485 * the right to do so, i.e. if you have succeeded in initialising before!
2486 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2487 * currently not initialised.
2489 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2491 unsigned long flags;
2493 if (!cpufreq_driver || (driver != cpufreq_driver))
2496 pr_debug("unregistering driver %s\n", driver->name);
2498 subsys_interface_unregister(&cpufreq_interface);
2499 if (cpufreq_boost_supported())
2500 cpufreq_sysfs_remove_file(&boost.attr);
2502 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2504 down_write(&cpufreq_rwsem);
2505 write_lock_irqsave(&cpufreq_driver_lock, flags);
2507 cpufreq_driver = NULL;
2509 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2510 up_write(&cpufreq_rwsem);
2514 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2516 static int __init cpufreq_core_init(void)
2518 if (cpufreq_disabled())
2521 cpufreq_global_kobject = kobject_create();
2522 BUG_ON(!cpufreq_global_kobject);
2526 core_initcall(cpufreq_core_init);