2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
51 static inline bool has_target(void)
53 return cpufreq_driver->target_index || cpufreq_driver->target;
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
60 static DECLARE_RWSEM(cpufreq_rwsem);
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82 init_cpufreq_transition_notifier_list_called = true;
85 pure_initcall(init_cpufreq_transition_notifier_list);
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
92 void disable_cpufreq(void)
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
99 bool have_governor_per_policy(void)
101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
107 if (have_governor_per_policy())
108 return &policy->kobj;
110 return cpufreq_global_kobject;
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
129 idle_time = cur_wall_time - busy_time;
131 *wall = cputime_to_usecs(cur_wall_time);
133 return cputime_to_usecs(idle_time);
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
162 ret = cpufreq_table_validate_and_show(policy, table);
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
168 policy->cpuinfo.transition_latency = transition_latency;
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
174 cpumask_setall(policy->cpus);
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
180 unsigned int cpufreq_generic_get(unsigned int cpu)
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
184 if (!policy || IS_ERR(policy->clk)) {
185 pr_err("%s: No %s associated to cpu: %d\n",
186 __func__, policy ? "clk" : "policy", cpu);
190 return clk_get_rate(policy->clk) / 1000;
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
194 /* Only for cpufreq core internal use */
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 return per_cpu(cpufreq_cpu_data, cpu);
200 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
202 struct cpufreq_policy *policy = NULL;
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
208 if (!down_read_trylock(&cpufreq_rwsem))
211 /* get the cpufreq driver */
212 read_lock_irqsave(&cpufreq_driver_lock, flags);
214 if (cpufreq_driver) {
216 policy = per_cpu(cpufreq_cpu_data, cpu);
218 kobject_get(&policy->kobj);
221 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
224 up_read(&cpufreq_rwsem);
228 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230 void cpufreq_cpu_put(struct cpufreq_policy *policy)
232 if (cpufreq_disabled())
235 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
240 /*********************************************************************
241 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
242 *********************************************************************/
245 * adjust_jiffies - adjust the system "loops_per_jiffy"
247 * This function alters the system "loops_per_jiffy" for the clock
248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
249 * systems as each CPU might be scaled differently. So, use the arch
250 * per-CPU loops_per_jiffy value wherever possible.
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
256 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258 if (ci->flags & CPUFREQ_CONST_LOOPS)
261 if (!l_p_j_ref_freq) {
262 l_p_j_ref = loops_per_jiffy;
263 l_p_j_ref_freq = ci->old;
264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265 l_p_j_ref, l_p_j_ref_freq);
267 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
268 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
269 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
271 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
272 loops_per_jiffy, ci->new);
276 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
282 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
283 struct cpufreq_freqs *freqs, unsigned int state)
285 BUG_ON(irqs_disabled());
287 if (cpufreq_disabled())
290 freqs->flags = cpufreq_driver->flags;
291 pr_debug("notification %u of frequency transition to %u kHz\n",
296 case CPUFREQ_PRECHANGE:
297 /* detect if the driver reported a value as "old frequency"
298 * which is not equal to what the cpufreq core thinks is
301 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
302 if ((policy) && (policy->cpu == freqs->cpu) &&
303 (policy->cur) && (policy->cur != freqs->old)) {
304 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
305 freqs->old, policy->cur);
306 freqs->old = policy->cur;
309 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
310 CPUFREQ_PRECHANGE, freqs);
311 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
314 case CPUFREQ_POSTCHANGE:
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
316 pr_debug("FREQ: %lu - CPU: %lu\n",
317 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
318 trace_cpu_frequency(freqs->new, freqs->cpu);
319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
320 CPUFREQ_POSTCHANGE, freqs);
321 if (likely(policy) && likely(policy->cpu == freqs->cpu))
322 policy->cur = freqs->new;
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
335 void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, unsigned int state)
338 for_each_cpu(freqs->cpu, policy->cpus)
339 __cpufreq_notify_transition(policy, freqs, state);
341 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
343 /* Do post notifications when there are chances that transition has failed */
344 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
345 struct cpufreq_freqs *freqs, int transition_failed)
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348 if (!transition_failed)
351 swap(freqs->old, freqs->new);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
355 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
358 /*********************************************************************
360 *********************************************************************/
361 static ssize_t show_boost(struct kobject *kobj,
362 struct attribute *attr, char *buf)
364 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
367 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
368 const char *buf, size_t count)
372 ret = sscanf(buf, "%d", &enable);
373 if (ret != 1 || enable < 0 || enable > 1)
376 if (cpufreq_boost_trigger_state(enable)) {
377 pr_err("%s: Cannot %s BOOST!\n",
378 __func__, enable ? "enable" : "disable");
382 pr_debug("%s: cpufreq BOOST %s\n",
383 __func__, enable ? "enabled" : "disabled");
387 define_one_global_rw(boost);
389 static struct cpufreq_governor *__find_governor(const char *str_governor)
391 struct cpufreq_governor *t;
393 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
394 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
401 * cpufreq_parse_governor - parse a governor string
403 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
404 struct cpufreq_governor **governor)
411 if (cpufreq_driver->setpolicy) {
412 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
413 *policy = CPUFREQ_POLICY_PERFORMANCE;
415 } else if (!strnicmp(str_governor, "powersave",
417 *policy = CPUFREQ_POLICY_POWERSAVE;
420 } else if (has_target()) {
421 struct cpufreq_governor *t;
423 mutex_lock(&cpufreq_governor_mutex);
425 t = __find_governor(str_governor);
430 mutex_unlock(&cpufreq_governor_mutex);
431 ret = request_module("cpufreq_%s", str_governor);
432 mutex_lock(&cpufreq_governor_mutex);
435 t = __find_governor(str_governor);
443 mutex_unlock(&cpufreq_governor_mutex);
450 * cpufreq_per_cpu_attr_read() / show_##file_name() -
451 * print out cpufreq information
453 * Write out information from cpufreq_driver->policy[cpu]; object must be
457 #define show_one(file_name, object) \
458 static ssize_t show_##file_name \
459 (struct cpufreq_policy *policy, char *buf) \
461 return sprintf(buf, "%u\n", policy->object); \
464 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
465 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
466 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
467 show_one(scaling_min_freq, min);
468 show_one(scaling_max_freq, max);
469 show_one(scaling_cur_freq, cur);
471 static int cpufreq_set_policy(struct cpufreq_policy *policy,
472 struct cpufreq_policy *new_policy);
475 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
477 #define store_one(file_name, object) \
478 static ssize_t store_##file_name \
479 (struct cpufreq_policy *policy, const char *buf, size_t count) \
482 struct cpufreq_policy new_policy; \
484 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
488 ret = sscanf(buf, "%u", &new_policy.object); \
492 ret = cpufreq_set_policy(policy, &new_policy); \
493 policy->user_policy.object = policy->object; \
495 return ret ? ret : count; \
498 store_one(scaling_min_freq, min);
499 store_one(scaling_max_freq, max);
502 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
504 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
507 unsigned int cur_freq = __cpufreq_get(policy->cpu);
509 return sprintf(buf, "<unknown>");
510 return sprintf(buf, "%u\n", cur_freq);
514 * show_scaling_governor - show the current policy for the specified CPU
516 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
518 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
519 return sprintf(buf, "powersave\n");
520 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
521 return sprintf(buf, "performance\n");
522 else if (policy->governor)
523 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
524 policy->governor->name);
529 * store_scaling_governor - store policy for the specified CPU
531 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
532 const char *buf, size_t count)
535 char str_governor[16];
536 struct cpufreq_policy new_policy;
538 ret = cpufreq_get_policy(&new_policy, policy->cpu);
542 ret = sscanf(buf, "%15s", str_governor);
546 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
547 &new_policy.governor))
550 ret = cpufreq_set_policy(policy, &new_policy);
552 policy->user_policy.policy = policy->policy;
553 policy->user_policy.governor = policy->governor;
562 * show_scaling_driver - show the cpufreq driver currently loaded
564 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
566 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
570 * show_scaling_available_governors - show the available CPUfreq governors
572 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
576 struct cpufreq_governor *t;
579 i += sprintf(buf, "performance powersave");
583 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
584 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
585 - (CPUFREQ_NAME_LEN + 2)))
587 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
590 i += sprintf(&buf[i], "\n");
594 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
599 for_each_cpu(cpu, mask) {
601 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
602 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
603 if (i >= (PAGE_SIZE - 5))
606 i += sprintf(&buf[i], "\n");
609 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
612 * show_related_cpus - show the CPUs affected by each transition even if
613 * hw coordination is in use
615 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
617 return cpufreq_show_cpus(policy->related_cpus, buf);
621 * show_affected_cpus - show the CPUs affected by each transition
623 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
625 return cpufreq_show_cpus(policy->cpus, buf);
628 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
629 const char *buf, size_t count)
631 unsigned int freq = 0;
634 if (!policy->governor || !policy->governor->store_setspeed)
637 ret = sscanf(buf, "%u", &freq);
641 policy->governor->store_setspeed(policy, freq);
646 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
648 if (!policy->governor || !policy->governor->show_setspeed)
649 return sprintf(buf, "<unsupported>\n");
651 return policy->governor->show_setspeed(policy, buf);
655 * show_bios_limit - show the current cpufreq HW/BIOS limitation
657 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
661 if (cpufreq_driver->bios_limit) {
662 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
664 return sprintf(buf, "%u\n", limit);
666 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
669 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
670 cpufreq_freq_attr_ro(cpuinfo_min_freq);
671 cpufreq_freq_attr_ro(cpuinfo_max_freq);
672 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
673 cpufreq_freq_attr_ro(scaling_available_governors);
674 cpufreq_freq_attr_ro(scaling_driver);
675 cpufreq_freq_attr_ro(scaling_cur_freq);
676 cpufreq_freq_attr_ro(bios_limit);
677 cpufreq_freq_attr_ro(related_cpus);
678 cpufreq_freq_attr_ro(affected_cpus);
679 cpufreq_freq_attr_rw(scaling_min_freq);
680 cpufreq_freq_attr_rw(scaling_max_freq);
681 cpufreq_freq_attr_rw(scaling_governor);
682 cpufreq_freq_attr_rw(scaling_setspeed);
684 static struct attribute *default_attrs[] = {
685 &cpuinfo_min_freq.attr,
686 &cpuinfo_max_freq.attr,
687 &cpuinfo_transition_latency.attr,
688 &scaling_min_freq.attr,
689 &scaling_max_freq.attr,
692 &scaling_governor.attr,
693 &scaling_driver.attr,
694 &scaling_available_governors.attr,
695 &scaling_setspeed.attr,
699 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
700 #define to_attr(a) container_of(a, struct freq_attr, attr)
702 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
708 if (!down_read_trylock(&cpufreq_rwsem))
711 down_read(&policy->rwsem);
714 ret = fattr->show(policy, buf);
718 up_read(&policy->rwsem);
719 up_read(&cpufreq_rwsem);
724 static ssize_t store(struct kobject *kobj, struct attribute *attr,
725 const char *buf, size_t count)
727 struct cpufreq_policy *policy = to_policy(kobj);
728 struct freq_attr *fattr = to_attr(attr);
729 ssize_t ret = -EINVAL;
733 if (!cpu_online(policy->cpu))
736 if (!down_read_trylock(&cpufreq_rwsem))
739 down_write(&policy->rwsem);
742 ret = fattr->store(policy, buf, count);
746 up_write(&policy->rwsem);
748 up_read(&cpufreq_rwsem);
755 static void cpufreq_sysfs_release(struct kobject *kobj)
757 struct cpufreq_policy *policy = to_policy(kobj);
758 pr_debug("last reference is dropped\n");
759 complete(&policy->kobj_unregister);
762 static const struct sysfs_ops sysfs_ops = {
767 static struct kobj_type ktype_cpufreq = {
768 .sysfs_ops = &sysfs_ops,
769 .default_attrs = default_attrs,
770 .release = cpufreq_sysfs_release,
773 struct kobject *cpufreq_global_kobject;
774 EXPORT_SYMBOL(cpufreq_global_kobject);
776 static int cpufreq_global_kobject_usage;
778 int cpufreq_get_global_kobject(void)
780 if (!cpufreq_global_kobject_usage++)
781 return kobject_add(cpufreq_global_kobject,
782 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
786 EXPORT_SYMBOL(cpufreq_get_global_kobject);
788 void cpufreq_put_global_kobject(void)
790 if (!--cpufreq_global_kobject_usage)
791 kobject_del(cpufreq_global_kobject);
793 EXPORT_SYMBOL(cpufreq_put_global_kobject);
795 int cpufreq_sysfs_create_file(const struct attribute *attr)
797 int ret = cpufreq_get_global_kobject();
800 ret = sysfs_create_file(cpufreq_global_kobject, attr);
802 cpufreq_put_global_kobject();
807 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
809 void cpufreq_sysfs_remove_file(const struct attribute *attr)
811 sysfs_remove_file(cpufreq_global_kobject, attr);
812 cpufreq_put_global_kobject();
814 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
816 /* symlink affected CPUs */
817 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
822 for_each_cpu(j, policy->cpus) {
823 struct device *cpu_dev;
825 if (j == policy->cpu)
828 pr_debug("Adding link for CPU: %u\n", j);
829 cpu_dev = get_cpu_device(j);
830 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
838 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
841 struct freq_attr **drv_attr;
844 /* prepare interface data */
845 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
846 &dev->kobj, "cpufreq");
850 /* set up files for this cpu device */
851 drv_attr = cpufreq_driver->attr;
852 while ((drv_attr) && (*drv_attr)) {
853 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
855 goto err_out_kobj_put;
858 if (cpufreq_driver->get) {
859 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
861 goto err_out_kobj_put;
864 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
866 goto err_out_kobj_put;
868 if (cpufreq_driver->bios_limit) {
869 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
871 goto err_out_kobj_put;
874 ret = cpufreq_add_dev_symlink(policy);
876 goto err_out_kobj_put;
881 kobject_put(&policy->kobj);
882 wait_for_completion(&policy->kobj_unregister);
886 static void cpufreq_init_policy(struct cpufreq_policy *policy)
888 struct cpufreq_governor *gov = NULL;
889 struct cpufreq_policy new_policy;
892 memcpy(&new_policy, policy, sizeof(*policy));
894 /* Update governor of new_policy to the governor used before hotplug */
895 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
897 pr_debug("Restoring governor %s for cpu %d\n",
898 policy->governor->name, policy->cpu);
900 gov = CPUFREQ_DEFAULT_GOVERNOR;
902 new_policy.governor = gov;
904 /* Use the default policy if its valid. */
905 if (cpufreq_driver->setpolicy)
906 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
908 /* set default policy */
909 ret = cpufreq_set_policy(policy, &new_policy);
911 pr_debug("setting policy failed\n");
912 if (cpufreq_driver->exit)
913 cpufreq_driver->exit(policy);
917 #ifdef CONFIG_HOTPLUG_CPU
918 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
919 unsigned int cpu, struct device *dev)
925 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
927 pr_err("%s: Failed to stop governor\n", __func__);
932 down_write(&policy->rwsem);
934 write_lock_irqsave(&cpufreq_driver_lock, flags);
936 cpumask_set_cpu(cpu, policy->cpus);
937 per_cpu(cpufreq_cpu_data, cpu) = policy;
938 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
940 up_write(&policy->rwsem);
943 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
944 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
945 pr_err("%s: Failed to start governor\n", __func__);
950 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
954 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
956 struct cpufreq_policy *policy;
959 read_lock_irqsave(&cpufreq_driver_lock, flags);
961 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
963 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
965 policy->governor = NULL;
970 static struct cpufreq_policy *cpufreq_policy_alloc(void)
972 struct cpufreq_policy *policy;
974 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
978 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
979 goto err_free_policy;
981 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
982 goto err_free_cpumask;
984 INIT_LIST_HEAD(&policy->policy_list);
985 init_rwsem(&policy->rwsem);
990 free_cpumask_var(policy->cpus);
997 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
999 struct kobject *kobj;
1000 struct completion *cmp;
1002 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1003 CPUFREQ_REMOVE_POLICY, policy);
1005 down_read(&policy->rwsem);
1006 kobj = &policy->kobj;
1007 cmp = &policy->kobj_unregister;
1008 up_read(&policy->rwsem);
1012 * We need to make sure that the underlying kobj is
1013 * actually not referenced anymore by anybody before we
1014 * proceed with unloading.
1016 pr_debug("waiting for dropping of refcount\n");
1017 wait_for_completion(cmp);
1018 pr_debug("wait complete\n");
1021 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1023 free_cpumask_var(policy->related_cpus);
1024 free_cpumask_var(policy->cpus);
1028 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1030 if (WARN_ON(cpu == policy->cpu))
1033 down_write(&policy->rwsem);
1035 policy->last_cpu = policy->cpu;
1038 up_write(&policy->rwsem);
1040 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1041 CPUFREQ_UPDATE_POLICY_CPU, policy);
1044 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1046 unsigned int j, cpu = dev->id;
1048 struct cpufreq_policy *policy;
1049 unsigned long flags;
1050 bool recover_policy = cpufreq_suspended;
1051 #ifdef CONFIG_HOTPLUG_CPU
1052 struct cpufreq_policy *tpolicy;
1055 if (cpu_is_offline(cpu))
1058 pr_debug("adding CPU %u\n", cpu);
1061 /* check whether a different CPU already registered this
1062 * CPU because it is in the same boat. */
1063 policy = cpufreq_cpu_get(cpu);
1064 if (unlikely(policy)) {
1065 cpufreq_cpu_put(policy);
1070 if (!down_read_trylock(&cpufreq_rwsem))
1073 #ifdef CONFIG_HOTPLUG_CPU
1074 /* Check if this cpu was hot-unplugged earlier and has siblings */
1075 read_lock_irqsave(&cpufreq_driver_lock, flags);
1076 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1077 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1078 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1079 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1080 up_read(&cpufreq_rwsem);
1084 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1088 * Restore the saved policy when doing light-weight init and fall back
1089 * to the full init if that fails.
1091 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1093 recover_policy = false;
1094 policy = cpufreq_policy_alloc();
1100 * In the resume path, since we restore a saved policy, the assignment
1101 * to policy->cpu is like an update of the existing policy, rather than
1102 * the creation of a brand new one. So we need to perform this update
1103 * by invoking update_policy_cpu().
1105 if (recover_policy && cpu != policy->cpu)
1106 update_policy_cpu(policy, cpu);
1110 cpumask_copy(policy->cpus, cpumask_of(cpu));
1112 init_completion(&policy->kobj_unregister);
1113 INIT_WORK(&policy->update, handle_update);
1115 /* call driver. From then on the cpufreq must be able
1116 * to accept all calls to ->verify and ->setpolicy for this CPU
1118 ret = cpufreq_driver->init(policy);
1120 pr_debug("initialization failed\n");
1121 goto err_set_policy_cpu;
1124 /* related cpus should atleast have policy->cpus */
1125 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1128 * affected cpus must always be the one, which are online. We aren't
1129 * managing offline cpus here.
1131 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1133 if (!recover_policy) {
1134 policy->user_policy.min = policy->min;
1135 policy->user_policy.max = policy->max;
1138 down_write(&policy->rwsem);
1139 write_lock_irqsave(&cpufreq_driver_lock, flags);
1140 for_each_cpu(j, policy->cpus)
1141 per_cpu(cpufreq_cpu_data, j) = policy;
1142 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1144 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1145 policy->cur = cpufreq_driver->get(policy->cpu);
1147 pr_err("%s: ->get() failed\n", __func__);
1153 * Sometimes boot loaders set CPU frequency to a value outside of
1154 * frequency table present with cpufreq core. In such cases CPU might be
1155 * unstable if it has to run on that frequency for long duration of time
1156 * and so its better to set it to a frequency which is specified in
1157 * freq-table. This also makes cpufreq stats inconsistent as
1158 * cpufreq-stats would fail to register because current frequency of CPU
1159 * isn't found in freq-table.
1161 * Because we don't want this change to effect boot process badly, we go
1162 * for the next freq which is >= policy->cur ('cur' must be set by now,
1163 * otherwise we will end up setting freq to lowest of the table as 'cur'
1164 * is initialized to zero).
1166 * We are passing target-freq as "policy->cur - 1" otherwise
1167 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1168 * equal to target-freq.
1170 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1172 /* Are we running at unknown frequency ? */
1173 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1174 if (ret == -EINVAL) {
1175 /* Warn user and fix it */
1176 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1177 __func__, policy->cpu, policy->cur);
1178 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1179 CPUFREQ_RELATION_L);
1182 * Reaching here after boot in a few seconds may not
1183 * mean that system will remain stable at "unknown"
1184 * frequency for longer duration. Hence, a BUG_ON().
1187 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1188 __func__, policy->cpu, policy->cur);
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1193 CPUFREQ_START, policy);
1195 if (!recover_policy) {
1196 ret = cpufreq_add_dev_interface(policy, dev);
1198 goto err_out_unregister;
1199 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1200 CPUFREQ_CREATE_POLICY, policy);
1203 write_lock_irqsave(&cpufreq_driver_lock, flags);
1204 list_add(&policy->policy_list, &cpufreq_policy_list);
1205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1207 cpufreq_init_policy(policy);
1209 if (!recover_policy) {
1210 policy->user_policy.policy = policy->policy;
1211 policy->user_policy.governor = policy->governor;
1213 up_write(&policy->rwsem);
1215 kobject_uevent(&policy->kobj, KOBJ_ADD);
1216 up_read(&cpufreq_rwsem);
1218 pr_debug("initialization complete\n");
1224 write_lock_irqsave(&cpufreq_driver_lock, flags);
1225 for_each_cpu(j, policy->cpus)
1226 per_cpu(cpufreq_cpu_data, j) = NULL;
1227 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1229 if (cpufreq_driver->exit)
1230 cpufreq_driver->exit(policy);
1232 if (recover_policy) {
1233 /* Do not leave stale fallback data behind. */
1234 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1235 cpufreq_policy_put_kobj(policy);
1237 cpufreq_policy_free(policy);
1240 up_read(&cpufreq_rwsem);
1246 * cpufreq_add_dev - add a CPU device
1248 * Adds the cpufreq interface for a CPU device.
1250 * The Oracle says: try running cpufreq registration/unregistration concurrently
1251 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1252 * mess up, but more thorough testing is needed. - Mathieu
1254 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1256 return __cpufreq_add_dev(dev, sif);
1259 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1260 unsigned int old_cpu)
1262 struct device *cpu_dev;
1265 /* first sibling now owns the new sysfs dir */
1266 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1268 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1269 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1271 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1273 down_write(&policy->rwsem);
1274 cpumask_set_cpu(old_cpu, policy->cpus);
1275 up_write(&policy->rwsem);
1277 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1286 static int __cpufreq_remove_dev_prepare(struct device *dev,
1287 struct subsys_interface *sif)
1289 unsigned int cpu = dev->id, cpus;
1291 unsigned long flags;
1292 struct cpufreq_policy *policy;
1294 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1296 write_lock_irqsave(&cpufreq_driver_lock, flags);
1298 policy = per_cpu(cpufreq_cpu_data, cpu);
1300 /* Save the policy somewhere when doing a light-weight tear-down */
1301 if (cpufreq_suspended)
1302 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1307 pr_debug("%s: No cpu_data found\n", __func__);
1312 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1314 pr_err("%s: Failed to stop governor\n", __func__);
1319 if (!cpufreq_driver->setpolicy)
1320 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1321 policy->governor->name, CPUFREQ_NAME_LEN);
1323 down_read(&policy->rwsem);
1324 cpus = cpumask_weight(policy->cpus);
1325 up_read(&policy->rwsem);
1327 if (cpu != policy->cpu) {
1328 sysfs_remove_link(&dev->kobj, "cpufreq");
1329 } else if (cpus > 1) {
1330 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1332 update_policy_cpu(policy, new_cpu);
1334 if (!cpufreq_suspended) {
1335 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1336 __func__, new_cpu, cpu);
1344 static int __cpufreq_remove_dev_finish(struct device *dev,
1345 struct subsys_interface *sif)
1347 unsigned int cpu = dev->id, cpus;
1349 unsigned long flags;
1350 struct cpufreq_policy *policy;
1352 read_lock_irqsave(&cpufreq_driver_lock, flags);
1353 policy = per_cpu(cpufreq_cpu_data, cpu);
1354 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1357 pr_debug("%s: No cpu_data found\n", __func__);
1361 down_write(&policy->rwsem);
1362 cpus = cpumask_weight(policy->cpus);
1365 cpumask_clear_cpu(cpu, policy->cpus);
1366 up_write(&policy->rwsem);
1368 /* If cpu is last user of policy, free policy */
1371 ret = __cpufreq_governor(policy,
1372 CPUFREQ_GOV_POLICY_EXIT);
1374 pr_err("%s: Failed to exit governor\n",
1380 if (!cpufreq_suspended)
1381 cpufreq_policy_put_kobj(policy);
1384 * Perform the ->exit() even during light-weight tear-down,
1385 * since this is a core component, and is essential for the
1386 * subsequent light-weight ->init() to succeed.
1388 if (cpufreq_driver->exit)
1389 cpufreq_driver->exit(policy);
1391 /* Remove policy from list of active policies */
1392 write_lock_irqsave(&cpufreq_driver_lock, flags);
1393 list_del(&policy->policy_list);
1394 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1396 if (!cpufreq_suspended)
1397 cpufreq_policy_free(policy);
1400 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1401 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1402 pr_err("%s: Failed to start governor\n",
1409 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1414 * cpufreq_remove_dev - remove a CPU device
1416 * Removes the cpufreq interface for a CPU device.
1418 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1420 unsigned int cpu = dev->id;
1423 if (cpu_is_offline(cpu))
1426 ret = __cpufreq_remove_dev_prepare(dev, sif);
1429 ret = __cpufreq_remove_dev_finish(dev, sif);
1434 static void handle_update(struct work_struct *work)
1436 struct cpufreq_policy *policy =
1437 container_of(work, struct cpufreq_policy, update);
1438 unsigned int cpu = policy->cpu;
1439 pr_debug("handle_update for cpu %u called\n", cpu);
1440 cpufreq_update_policy(cpu);
1444 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1447 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1448 * @new_freq: CPU frequency the CPU actually runs at
1450 * We adjust to current frequency first, and need to clean up later.
1451 * So either call to cpufreq_update_policy() or schedule handle_update()).
1453 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1454 unsigned int new_freq)
1456 struct cpufreq_policy *policy;
1457 struct cpufreq_freqs freqs;
1458 unsigned long flags;
1460 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1461 old_freq, new_freq);
1463 freqs.old = old_freq;
1464 freqs.new = new_freq;
1466 read_lock_irqsave(&cpufreq_driver_lock, flags);
1467 policy = per_cpu(cpufreq_cpu_data, cpu);
1468 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1470 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1471 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1475 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1478 * This is the last known freq, without actually getting it from the driver.
1479 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1481 unsigned int cpufreq_quick_get(unsigned int cpu)
1483 struct cpufreq_policy *policy;
1484 unsigned int ret_freq = 0;
1486 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1487 return cpufreq_driver->get(cpu);
1489 policy = cpufreq_cpu_get(cpu);
1491 ret_freq = policy->cur;
1492 cpufreq_cpu_put(policy);
1497 EXPORT_SYMBOL(cpufreq_quick_get);
1500 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1503 * Just return the max possible frequency for a given CPU.
1505 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1507 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1508 unsigned int ret_freq = 0;
1511 ret_freq = policy->max;
1512 cpufreq_cpu_put(policy);
1517 EXPORT_SYMBOL(cpufreq_quick_get_max);
1519 static unsigned int __cpufreq_get(unsigned int cpu)
1521 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1522 unsigned int ret_freq = 0;
1524 if (!cpufreq_driver->get)
1527 ret_freq = cpufreq_driver->get(cpu);
1529 if (ret_freq && policy->cur &&
1530 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1531 /* verify no discrepancy between actual and
1532 saved value exists */
1533 if (unlikely(ret_freq != policy->cur)) {
1534 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1535 schedule_work(&policy->update);
1543 * cpufreq_get - get the current CPU frequency (in kHz)
1546 * Get the CPU current (static) CPU frequency
1548 unsigned int cpufreq_get(unsigned int cpu)
1550 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1551 unsigned int ret_freq = 0;
1554 down_read(&policy->rwsem);
1555 ret_freq = __cpufreq_get(cpu);
1556 up_read(&policy->rwsem);
1558 cpufreq_cpu_put(policy);
1563 EXPORT_SYMBOL(cpufreq_get);
1565 static struct subsys_interface cpufreq_interface = {
1567 .subsys = &cpu_subsys,
1568 .add_dev = cpufreq_add_dev,
1569 .remove_dev = cpufreq_remove_dev,
1573 * In case platform wants some specific frequency to be configured
1576 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1580 if (!policy->suspend_freq) {
1581 pr_err("%s: suspend_freq can't be zero\n", __func__);
1585 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1586 policy->suspend_freq);
1588 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1589 CPUFREQ_RELATION_H);
1591 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1592 __func__, policy->suspend_freq, ret);
1596 EXPORT_SYMBOL(cpufreq_generic_suspend);
1599 * cpufreq_suspend() - Suspend CPUFreq governors
1601 * Called during system wide Suspend/Hibernate cycles for suspending governors
1602 * as some platforms can't change frequency after this point in suspend cycle.
1603 * Because some of the devices (like: i2c, regulators, etc) they use for
1604 * changing frequency are suspended quickly after this point.
1606 void cpufreq_suspend(void)
1608 struct cpufreq_policy *policy;
1610 if (!cpufreq_driver)
1616 pr_debug("%s: Suspending Governors\n", __func__);
1618 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1619 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1620 pr_err("%s: Failed to stop governor for policy: %p\n",
1622 else if (cpufreq_driver->suspend
1623 && cpufreq_driver->suspend(policy))
1624 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1628 cpufreq_suspended = true;
1632 * cpufreq_resume() - Resume CPUFreq governors
1634 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1635 * are suspended with cpufreq_suspend().
1637 void cpufreq_resume(void)
1639 struct cpufreq_policy *policy;
1641 if (!cpufreq_driver)
1647 pr_debug("%s: Resuming Governors\n", __func__);
1649 cpufreq_suspended = false;
1651 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1652 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1653 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1654 pr_err("%s: Failed to start governor for policy: %p\n",
1656 else if (cpufreq_driver->resume
1657 && cpufreq_driver->resume(policy))
1658 pr_err("%s: Failed to resume driver: %p\n", __func__,
1662 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1663 * policy in list. It will verify that the current freq is in
1664 * sync with what we believe it to be.
1666 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1667 schedule_work(&policy->update);
1672 * cpufreq_get_current_driver - return current driver's name
1674 * Return the name string of the currently loaded cpufreq driver
1677 const char *cpufreq_get_current_driver(void)
1680 return cpufreq_driver->name;
1684 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1686 /*********************************************************************
1687 * NOTIFIER LISTS INTERFACE *
1688 *********************************************************************/
1691 * cpufreq_register_notifier - register a driver with cpufreq
1692 * @nb: notifier function to register
1693 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1695 * Add a driver to one of two lists: either a list of drivers that
1696 * are notified about clock rate changes (once before and once after
1697 * the transition), or a list of drivers that are notified about
1698 * changes in cpufreq policy.
1700 * This function may sleep, and has the same return conditions as
1701 * blocking_notifier_chain_register.
1703 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1707 if (cpufreq_disabled())
1710 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1713 case CPUFREQ_TRANSITION_NOTIFIER:
1714 ret = srcu_notifier_chain_register(
1715 &cpufreq_transition_notifier_list, nb);
1717 case CPUFREQ_POLICY_NOTIFIER:
1718 ret = blocking_notifier_chain_register(
1719 &cpufreq_policy_notifier_list, nb);
1727 EXPORT_SYMBOL(cpufreq_register_notifier);
1730 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1731 * @nb: notifier block to be unregistered
1732 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1734 * Remove a driver from the CPU frequency notifier list.
1736 * This function may sleep, and has the same return conditions as
1737 * blocking_notifier_chain_unregister.
1739 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1743 if (cpufreq_disabled())
1747 case CPUFREQ_TRANSITION_NOTIFIER:
1748 ret = srcu_notifier_chain_unregister(
1749 &cpufreq_transition_notifier_list, nb);
1751 case CPUFREQ_POLICY_NOTIFIER:
1752 ret = blocking_notifier_chain_unregister(
1753 &cpufreq_policy_notifier_list, nb);
1761 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1764 /*********************************************************************
1766 *********************************************************************/
1768 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1769 unsigned int target_freq,
1770 unsigned int relation)
1772 int retval = -EINVAL;
1773 unsigned int old_target_freq = target_freq;
1775 if (cpufreq_disabled())
1778 /* Make sure that target_freq is within supported range */
1779 if (target_freq > policy->max)
1780 target_freq = policy->max;
1781 if (target_freq < policy->min)
1782 target_freq = policy->min;
1784 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1785 policy->cpu, target_freq, relation, old_target_freq);
1788 * This might look like a redundant call as we are checking it again
1789 * after finding index. But it is left intentionally for cases where
1790 * exactly same freq is called again and so we can save on few function
1793 if (target_freq == policy->cur)
1796 if (cpufreq_driver->target)
1797 retval = cpufreq_driver->target(policy, target_freq, relation);
1798 else if (cpufreq_driver->target_index) {
1799 struct cpufreq_frequency_table *freq_table;
1800 struct cpufreq_freqs freqs;
1804 freq_table = cpufreq_frequency_get_table(policy->cpu);
1805 if (unlikely(!freq_table)) {
1806 pr_err("%s: Unable to find freq_table\n", __func__);
1810 retval = cpufreq_frequency_table_target(policy, freq_table,
1811 target_freq, relation, &index);
1812 if (unlikely(retval)) {
1813 pr_err("%s: Unable to find matching freq\n", __func__);
1817 if (freq_table[index].frequency == policy->cur) {
1822 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1825 freqs.old = policy->cur;
1826 freqs.new = freq_table[index].frequency;
1829 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1830 __func__, policy->cpu, freqs.old, freqs.new);
1832 cpufreq_notify_transition(policy, &freqs,
1836 retval = cpufreq_driver->target_index(policy, index);
1838 pr_err("%s: Failed to change cpu frequency: %d\n",
1842 cpufreq_notify_post_transition(policy, &freqs, retval);
1848 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1850 int cpufreq_driver_target(struct cpufreq_policy *policy,
1851 unsigned int target_freq,
1852 unsigned int relation)
1856 down_write(&policy->rwsem);
1858 ret = __cpufreq_driver_target(policy, target_freq, relation);
1860 up_write(&policy->rwsem);
1864 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1867 * when "event" is CPUFREQ_GOV_LIMITS
1870 static int __cpufreq_governor(struct cpufreq_policy *policy,
1875 /* Only must be defined when default governor is known to have latency
1876 restrictions, like e.g. conservative or ondemand.
1877 That this is the case is already ensured in Kconfig
1879 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1880 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1882 struct cpufreq_governor *gov = NULL;
1885 /* Don't start any governor operations if we are entering suspend */
1886 if (cpufreq_suspended)
1889 if (policy->governor->max_transition_latency &&
1890 policy->cpuinfo.transition_latency >
1891 policy->governor->max_transition_latency) {
1895 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1896 policy->governor->name, gov->name);
1897 policy->governor = gov;
1901 if (event == CPUFREQ_GOV_POLICY_INIT)
1902 if (!try_module_get(policy->governor->owner))
1905 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1906 policy->cpu, event);
1908 mutex_lock(&cpufreq_governor_lock);
1909 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1910 || (!policy->governor_enabled
1911 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1912 mutex_unlock(&cpufreq_governor_lock);
1916 if (event == CPUFREQ_GOV_STOP)
1917 policy->governor_enabled = false;
1918 else if (event == CPUFREQ_GOV_START)
1919 policy->governor_enabled = true;
1921 mutex_unlock(&cpufreq_governor_lock);
1923 ret = policy->governor->governor(policy, event);
1926 if (event == CPUFREQ_GOV_POLICY_INIT)
1927 policy->governor->initialized++;
1928 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1929 policy->governor->initialized--;
1931 /* Restore original values */
1932 mutex_lock(&cpufreq_governor_lock);
1933 if (event == CPUFREQ_GOV_STOP)
1934 policy->governor_enabled = true;
1935 else if (event == CPUFREQ_GOV_START)
1936 policy->governor_enabled = false;
1937 mutex_unlock(&cpufreq_governor_lock);
1940 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1941 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1942 module_put(policy->governor->owner);
1947 int cpufreq_register_governor(struct cpufreq_governor *governor)
1954 if (cpufreq_disabled())
1957 mutex_lock(&cpufreq_governor_mutex);
1959 governor->initialized = 0;
1961 if (__find_governor(governor->name) == NULL) {
1963 list_add(&governor->governor_list, &cpufreq_governor_list);
1966 mutex_unlock(&cpufreq_governor_mutex);
1969 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1971 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1978 if (cpufreq_disabled())
1981 for_each_present_cpu(cpu) {
1982 if (cpu_online(cpu))
1984 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1985 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1988 mutex_lock(&cpufreq_governor_mutex);
1989 list_del(&governor->governor_list);
1990 mutex_unlock(&cpufreq_governor_mutex);
1993 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1996 /*********************************************************************
1997 * POLICY INTERFACE *
1998 *********************************************************************/
2001 * cpufreq_get_policy - get the current cpufreq_policy
2002 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2005 * Reads the current cpufreq policy.
2007 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2009 struct cpufreq_policy *cpu_policy;
2013 cpu_policy = cpufreq_cpu_get(cpu);
2017 memcpy(policy, cpu_policy, sizeof(*policy));
2019 cpufreq_cpu_put(cpu_policy);
2022 EXPORT_SYMBOL(cpufreq_get_policy);
2025 * policy : current policy.
2026 * new_policy: policy to be set.
2028 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2029 struct cpufreq_policy *new_policy)
2031 struct cpufreq_governor *old_gov;
2034 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2035 new_policy->cpu, new_policy->min, new_policy->max);
2037 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2039 if (new_policy->min > policy->max || new_policy->max < policy->min)
2042 /* verify the cpu speed can be set within this limit */
2043 ret = cpufreq_driver->verify(new_policy);
2047 /* adjust if necessary - all reasons */
2048 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2049 CPUFREQ_ADJUST, new_policy);
2051 /* adjust if necessary - hardware incompatibility*/
2052 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2053 CPUFREQ_INCOMPATIBLE, new_policy);
2056 * verify the cpu speed can be set within this limit, which might be
2057 * different to the first one
2059 ret = cpufreq_driver->verify(new_policy);
2063 /* notification of the new policy */
2064 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2065 CPUFREQ_NOTIFY, new_policy);
2067 policy->min = new_policy->min;
2068 policy->max = new_policy->max;
2070 pr_debug("new min and max freqs are %u - %u kHz\n",
2071 policy->min, policy->max);
2073 if (cpufreq_driver->setpolicy) {
2074 policy->policy = new_policy->policy;
2075 pr_debug("setting range\n");
2076 return cpufreq_driver->setpolicy(new_policy);
2079 if (new_policy->governor == policy->governor)
2082 pr_debug("governor switch\n");
2084 /* save old, working values */
2085 old_gov = policy->governor;
2086 /* end old governor */
2088 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2089 up_write(&policy->rwsem);
2090 __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
2091 down_write(&policy->rwsem);
2094 /* start new governor */
2095 policy->governor = new_policy->governor;
2096 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2097 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2100 up_write(&policy->rwsem);
2101 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2102 down_write(&policy->rwsem);
2105 /* new governor failed, so re-start old one */
2106 pr_debug("starting governor %s failed\n", policy->governor->name);
2108 policy->governor = old_gov;
2109 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2110 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2116 pr_debug("governor: change or update limits\n");
2117 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2121 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2122 * @cpu: CPU which shall be re-evaluated
2124 * Useful for policy notifiers which have different necessities
2125 * at different times.
2127 int cpufreq_update_policy(unsigned int cpu)
2129 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2130 struct cpufreq_policy new_policy;
2138 down_write(&policy->rwsem);
2140 pr_debug("updating policy for CPU %u\n", cpu);
2141 memcpy(&new_policy, policy, sizeof(*policy));
2142 new_policy.min = policy->user_policy.min;
2143 new_policy.max = policy->user_policy.max;
2144 new_policy.policy = policy->user_policy.policy;
2145 new_policy.governor = policy->user_policy.governor;
2148 * BIOS might change freq behind our back
2149 * -> ask driver for current freq and notify governors about a change
2151 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2152 new_policy.cur = cpufreq_driver->get(cpu);
2153 if (WARN_ON(!new_policy.cur)) {
2159 pr_debug("Driver did not initialize current freq\n");
2160 policy->cur = new_policy.cur;
2162 if (policy->cur != new_policy.cur && has_target())
2163 cpufreq_out_of_sync(cpu, policy->cur,
2168 ret = cpufreq_set_policy(policy, &new_policy);
2170 up_write(&policy->rwsem);
2172 cpufreq_cpu_put(policy);
2176 EXPORT_SYMBOL(cpufreq_update_policy);
2178 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2179 unsigned long action, void *hcpu)
2181 unsigned int cpu = (unsigned long)hcpu;
2184 dev = get_cpu_device(cpu);
2186 switch (action & ~CPU_TASKS_FROZEN) {
2188 __cpufreq_add_dev(dev, NULL);
2191 case CPU_DOWN_PREPARE:
2192 __cpufreq_remove_dev_prepare(dev, NULL);
2196 __cpufreq_remove_dev_finish(dev, NULL);
2199 case CPU_DOWN_FAILED:
2200 __cpufreq_add_dev(dev, NULL);
2207 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2208 .notifier_call = cpufreq_cpu_callback,
2211 /*********************************************************************
2213 *********************************************************************/
2214 static int cpufreq_boost_set_sw(int state)
2216 struct cpufreq_frequency_table *freq_table;
2217 struct cpufreq_policy *policy;
2220 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2221 freq_table = cpufreq_frequency_get_table(policy->cpu);
2223 ret = cpufreq_frequency_table_cpuinfo(policy,
2226 pr_err("%s: Policy frequency update failed\n",
2230 policy->user_policy.max = policy->max;
2231 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2238 int cpufreq_boost_trigger_state(int state)
2240 unsigned long flags;
2243 if (cpufreq_driver->boost_enabled == state)
2246 write_lock_irqsave(&cpufreq_driver_lock, flags);
2247 cpufreq_driver->boost_enabled = state;
2248 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2250 ret = cpufreq_driver->set_boost(state);
2252 write_lock_irqsave(&cpufreq_driver_lock, flags);
2253 cpufreq_driver->boost_enabled = !state;
2254 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2256 pr_err("%s: Cannot %s BOOST\n",
2257 __func__, state ? "enable" : "disable");
2263 int cpufreq_boost_supported(void)
2265 if (likely(cpufreq_driver))
2266 return cpufreq_driver->boost_supported;
2270 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2272 int cpufreq_boost_enabled(void)
2274 return cpufreq_driver->boost_enabled;
2276 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2278 /*********************************************************************
2279 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2280 *********************************************************************/
2283 * cpufreq_register_driver - register a CPU Frequency driver
2284 * @driver_data: A struct cpufreq_driver containing the values#
2285 * submitted by the CPU Frequency driver.
2287 * Registers a CPU Frequency driver to this core code. This code
2288 * returns zero on success, -EBUSY when another driver got here first
2289 * (and isn't unregistered in the meantime).
2292 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2294 unsigned long flags;
2297 if (cpufreq_disabled())
2300 if (!driver_data || !driver_data->verify || !driver_data->init ||
2301 !(driver_data->setpolicy || driver_data->target_index ||
2302 driver_data->target) ||
2303 (driver_data->setpolicy && (driver_data->target_index ||
2304 driver_data->target)))
2307 pr_debug("trying to register driver %s\n", driver_data->name);
2309 if (driver_data->setpolicy)
2310 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2312 write_lock_irqsave(&cpufreq_driver_lock, flags);
2313 if (cpufreq_driver) {
2314 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2317 cpufreq_driver = driver_data;
2318 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2320 if (cpufreq_boost_supported()) {
2322 * Check if driver provides function to enable boost -
2323 * if not, use cpufreq_boost_set_sw as default
2325 if (!cpufreq_driver->set_boost)
2326 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2328 ret = cpufreq_sysfs_create_file(&boost.attr);
2330 pr_err("%s: cannot register global BOOST sysfs file\n",
2332 goto err_null_driver;
2336 ret = subsys_interface_register(&cpufreq_interface);
2338 goto err_boost_unreg;
2340 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2344 /* check for at least one working CPU */
2345 for (i = 0; i < nr_cpu_ids; i++)
2346 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2351 /* if all ->init() calls failed, unregister */
2353 pr_debug("no CPU initialized for driver %s\n",
2359 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2360 pr_debug("driver %s up and running\n", driver_data->name);
2364 subsys_interface_unregister(&cpufreq_interface);
2366 if (cpufreq_boost_supported())
2367 cpufreq_sysfs_remove_file(&boost.attr);
2369 write_lock_irqsave(&cpufreq_driver_lock, flags);
2370 cpufreq_driver = NULL;
2371 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2374 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2377 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2379 * Unregister the current CPUFreq driver. Only call this if you have
2380 * the right to do so, i.e. if you have succeeded in initialising before!
2381 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2382 * currently not initialised.
2384 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2386 unsigned long flags;
2388 if (!cpufreq_driver || (driver != cpufreq_driver))
2391 pr_debug("unregistering driver %s\n", driver->name);
2393 subsys_interface_unregister(&cpufreq_interface);
2394 if (cpufreq_boost_supported())
2395 cpufreq_sysfs_remove_file(&boost.attr);
2397 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2399 down_write(&cpufreq_rwsem);
2400 write_lock_irqsave(&cpufreq_driver_lock, flags);
2402 cpufreq_driver = NULL;
2404 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2405 up_write(&cpufreq_rwsem);
2409 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2411 static int __init cpufreq_core_init(void)
2413 if (cpufreq_disabled())
2416 cpufreq_global_kobject = kobject_create();
2417 BUG_ON(!cpufreq_global_kobject);
2421 core_initcall(cpufreq_core_init);