2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 static inline bool has_target(void)
52 return cpufreq_driver->target_index || cpufreq_driver->target;
56 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
57 * all cpufreq/hotplug/workqueue/etc related lock issues.
59 * The rules for this semaphore:
60 * - Any routine that wants to read from the policy structure will
61 * do a down_read on this semaphore.
62 * - Any routine that will write to the policy structure and/or may take away
63 * the policy altogether (eg. CPU hotplug), will hold this lock in write
64 * mode before doing so.
67 * - Governor routines that can be called in cpufreq hotplug path should not
68 * take this sem as top level hotplug notifier handler takes this.
69 * - Lock should not be held across
70 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
72 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
74 #define lock_policy_rwsem(mode, cpu) \
75 static void lock_policy_rwsem_##mode(int cpu) \
77 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
82 lock_policy_rwsem(read, cpu);
83 lock_policy_rwsem(write, cpu);
85 #define unlock_policy_rwsem(mode, cpu) \
86 static void unlock_policy_rwsem_##mode(int cpu) \
88 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
90 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
93 unlock_policy_rwsem(read, cpu);
94 unlock_policy_rwsem(write, cpu);
97 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
100 static DECLARE_RWSEM(cpufreq_rwsem);
102 /* internal prototypes */
103 static int __cpufreq_governor(struct cpufreq_policy *policy,
105 static unsigned int __cpufreq_get(unsigned int cpu);
106 static void handle_update(struct work_struct *work);
109 * Two notifier lists: the "policy" list is involved in the
110 * validation process for a new CPU frequency policy; the
111 * "transition" list for kernel code that needs to handle
112 * changes to devices when the CPU clock speed changes.
113 * The mutex locks both lists.
115 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
116 static struct srcu_notifier_head cpufreq_transition_notifier_list;
118 static bool init_cpufreq_transition_notifier_list_called;
119 static int __init init_cpufreq_transition_notifier_list(void)
121 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
122 init_cpufreq_transition_notifier_list_called = true;
125 pure_initcall(init_cpufreq_transition_notifier_list);
127 static int off __read_mostly;
128 static int cpufreq_disabled(void)
132 void disable_cpufreq(void)
136 static LIST_HEAD(cpufreq_governor_list);
137 static DEFINE_MUTEX(cpufreq_governor_mutex);
139 bool have_governor_per_policy(void)
141 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
143 EXPORT_SYMBOL_GPL(have_governor_per_policy);
145 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
147 if (have_governor_per_policy())
148 return &policy->kobj;
150 return cpufreq_global_kobject;
152 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
154 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
160 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
162 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
166 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
167 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
169 idle_time = cur_wall_time - busy_time;
171 *wall = cputime_to_usecs(cur_wall_time);
173 return cputime_to_usecs(idle_time);
176 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
178 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
180 if (idle_time == -1ULL)
181 return get_cpu_idle_time_jiffy(cpu, wall);
183 idle_time += get_cpu_iowait_time_us(cpu, wall);
187 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
190 * This is a generic cpufreq init() routine which can be used by cpufreq
191 * drivers of SMP systems. It will do following:
192 * - validate & show freq table passed
193 * - set policies transition latency
194 * - policy->cpus with all possible CPUs
196 int cpufreq_generic_init(struct cpufreq_policy *policy,
197 struct cpufreq_frequency_table *table,
198 unsigned int transition_latency)
202 ret = cpufreq_table_validate_and_show(policy, table);
204 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
208 policy->cpuinfo.transition_latency = transition_latency;
211 * The driver only supports the SMP configuartion where all processors
212 * share the clock and voltage and clock.
214 cpumask_setall(policy->cpus);
218 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
220 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
222 struct cpufreq_policy *policy = NULL;
225 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
228 if (!down_read_trylock(&cpufreq_rwsem))
231 /* get the cpufreq driver */
232 read_lock_irqsave(&cpufreq_driver_lock, flags);
234 if (cpufreq_driver) {
236 policy = per_cpu(cpufreq_cpu_data, cpu);
238 kobject_get(&policy->kobj);
241 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
244 up_read(&cpufreq_rwsem);
248 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
250 void cpufreq_cpu_put(struct cpufreq_policy *policy)
252 if (cpufreq_disabled())
255 kobject_put(&policy->kobj);
256 up_read(&cpufreq_rwsem);
258 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
260 /*********************************************************************
261 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
262 *********************************************************************/
265 * adjust_jiffies - adjust the system "loops_per_jiffy"
267 * This function alters the system "loops_per_jiffy" for the clock
268 * speed change. Note that loops_per_jiffy cannot be updated on SMP
269 * systems as each CPU might be scaled differently. So, use the arch
270 * per-CPU loops_per_jiffy value wherever possible.
273 static unsigned long l_p_j_ref;
274 static unsigned int l_p_j_ref_freq;
276 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
278 if (ci->flags & CPUFREQ_CONST_LOOPS)
281 if (!l_p_j_ref_freq) {
282 l_p_j_ref = loops_per_jiffy;
283 l_p_j_ref_freq = ci->old;
284 pr_debug("saving %lu as reference value for loops_per_jiffy; "
285 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
287 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
288 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
289 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
291 pr_debug("scaling loops_per_jiffy to %lu "
292 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
296 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
302 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
303 struct cpufreq_freqs *freqs, unsigned int state)
305 BUG_ON(irqs_disabled());
307 if (cpufreq_disabled())
310 freqs->flags = cpufreq_driver->flags;
311 pr_debug("notification %u of frequency transition to %u kHz\n",
316 case CPUFREQ_PRECHANGE:
317 /* detect if the driver reported a value as "old frequency"
318 * which is not equal to what the cpufreq core thinks is
321 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
322 if ((policy) && (policy->cpu == freqs->cpu) &&
323 (policy->cur) && (policy->cur != freqs->old)) {
324 pr_debug("Warning: CPU frequency is"
325 " %u, cpufreq assumed %u kHz.\n",
326 freqs->old, policy->cur);
327 freqs->old = policy->cur;
330 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
331 CPUFREQ_PRECHANGE, freqs);
332 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
335 case CPUFREQ_POSTCHANGE:
336 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
337 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
338 (unsigned long)freqs->cpu);
339 trace_cpu_frequency(freqs->new, freqs->cpu);
340 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
341 CPUFREQ_POSTCHANGE, freqs);
342 if (likely(policy) && likely(policy->cpu == freqs->cpu))
343 policy->cur = freqs->new;
349 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
350 * on frequency transition.
352 * This function calls the transition notifiers and the "adjust_jiffies"
353 * function. It is called twice on all CPU frequency changes that have
356 void cpufreq_notify_transition(struct cpufreq_policy *policy,
357 struct cpufreq_freqs *freqs, unsigned int state)
359 for_each_cpu(freqs->cpu, policy->cpus)
360 __cpufreq_notify_transition(policy, freqs, state);
362 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
365 /*********************************************************************
367 *********************************************************************/
369 static struct cpufreq_governor *__find_governor(const char *str_governor)
371 struct cpufreq_governor *t;
373 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
374 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
381 * cpufreq_parse_governor - parse a governor string
383 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
384 struct cpufreq_governor **governor)
391 if (cpufreq_driver->setpolicy) {
392 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
393 *policy = CPUFREQ_POLICY_PERFORMANCE;
395 } else if (!strnicmp(str_governor, "powersave",
397 *policy = CPUFREQ_POLICY_POWERSAVE;
400 } else if (has_target()) {
401 struct cpufreq_governor *t;
403 mutex_lock(&cpufreq_governor_mutex);
405 t = __find_governor(str_governor);
410 mutex_unlock(&cpufreq_governor_mutex);
411 ret = request_module("cpufreq_%s", str_governor);
412 mutex_lock(&cpufreq_governor_mutex);
415 t = __find_governor(str_governor);
423 mutex_unlock(&cpufreq_governor_mutex);
430 * cpufreq_per_cpu_attr_read() / show_##file_name() -
431 * print out cpufreq information
433 * Write out information from cpufreq_driver->policy[cpu]; object must be
437 #define show_one(file_name, object) \
438 static ssize_t show_##file_name \
439 (struct cpufreq_policy *policy, char *buf) \
441 return sprintf(buf, "%u\n", policy->object); \
444 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
445 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
446 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
447 show_one(scaling_min_freq, min);
448 show_one(scaling_max_freq, max);
449 show_one(scaling_cur_freq, cur);
451 static int cpufreq_set_policy(struct cpufreq_policy *policy,
452 struct cpufreq_policy *new_policy);
455 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
457 #define store_one(file_name, object) \
458 static ssize_t store_##file_name \
459 (struct cpufreq_policy *policy, const char *buf, size_t count) \
462 struct cpufreq_policy new_policy; \
464 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
468 ret = sscanf(buf, "%u", &new_policy.object); \
472 ret = cpufreq_set_policy(policy, &new_policy); \
473 policy->user_policy.object = policy->object; \
475 return ret ? ret : count; \
478 store_one(scaling_min_freq, min);
479 store_one(scaling_max_freq, max);
482 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
484 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
487 unsigned int cur_freq = __cpufreq_get(policy->cpu);
489 return sprintf(buf, "<unknown>");
490 return sprintf(buf, "%u\n", cur_freq);
494 * show_scaling_governor - show the current policy for the specified CPU
496 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
498 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
499 return sprintf(buf, "powersave\n");
500 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
501 return sprintf(buf, "performance\n");
502 else if (policy->governor)
503 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
504 policy->governor->name);
509 * store_scaling_governor - store policy for the specified CPU
511 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
512 const char *buf, size_t count)
515 char str_governor[16];
516 struct cpufreq_policy new_policy;
518 ret = cpufreq_get_policy(&new_policy, policy->cpu);
522 ret = sscanf(buf, "%15s", str_governor);
526 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
527 &new_policy.governor))
530 ret = cpufreq_set_policy(policy, &new_policy);
532 policy->user_policy.policy = policy->policy;
533 policy->user_policy.governor = policy->governor;
542 * show_scaling_driver - show the cpufreq driver currently loaded
544 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
546 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
550 * show_scaling_available_governors - show the available CPUfreq governors
552 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
556 struct cpufreq_governor *t;
559 i += sprintf(buf, "performance powersave");
563 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
564 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
565 - (CPUFREQ_NAME_LEN + 2)))
567 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
570 i += sprintf(&buf[i], "\n");
574 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
579 for_each_cpu(cpu, mask) {
581 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
582 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
583 if (i >= (PAGE_SIZE - 5))
586 i += sprintf(&buf[i], "\n");
589 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
592 * show_related_cpus - show the CPUs affected by each transition even if
593 * hw coordination is in use
595 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
597 return cpufreq_show_cpus(policy->related_cpus, buf);
601 * show_affected_cpus - show the CPUs affected by each transition
603 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
605 return cpufreq_show_cpus(policy->cpus, buf);
608 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
609 const char *buf, size_t count)
611 unsigned int freq = 0;
614 if (!policy->governor || !policy->governor->store_setspeed)
617 ret = sscanf(buf, "%u", &freq);
621 policy->governor->store_setspeed(policy, freq);
626 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
628 if (!policy->governor || !policy->governor->show_setspeed)
629 return sprintf(buf, "<unsupported>\n");
631 return policy->governor->show_setspeed(policy, buf);
635 * show_bios_limit - show the current cpufreq HW/BIOS limitation
637 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
641 if (cpufreq_driver->bios_limit) {
642 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
644 return sprintf(buf, "%u\n", limit);
646 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
649 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
650 cpufreq_freq_attr_ro(cpuinfo_min_freq);
651 cpufreq_freq_attr_ro(cpuinfo_max_freq);
652 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
653 cpufreq_freq_attr_ro(scaling_available_governors);
654 cpufreq_freq_attr_ro(scaling_driver);
655 cpufreq_freq_attr_ro(scaling_cur_freq);
656 cpufreq_freq_attr_ro(bios_limit);
657 cpufreq_freq_attr_ro(related_cpus);
658 cpufreq_freq_attr_ro(affected_cpus);
659 cpufreq_freq_attr_rw(scaling_min_freq);
660 cpufreq_freq_attr_rw(scaling_max_freq);
661 cpufreq_freq_attr_rw(scaling_governor);
662 cpufreq_freq_attr_rw(scaling_setspeed);
664 static struct attribute *default_attrs[] = {
665 &cpuinfo_min_freq.attr,
666 &cpuinfo_max_freq.attr,
667 &cpuinfo_transition_latency.attr,
668 &scaling_min_freq.attr,
669 &scaling_max_freq.attr,
672 &scaling_governor.attr,
673 &scaling_driver.attr,
674 &scaling_available_governors.attr,
675 &scaling_setspeed.attr,
679 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
680 #define to_attr(a) container_of(a, struct freq_attr, attr)
682 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
684 struct cpufreq_policy *policy = to_policy(kobj);
685 struct freq_attr *fattr = to_attr(attr);
688 if (!down_read_trylock(&cpufreq_rwsem))
691 lock_policy_rwsem_read(policy->cpu);
694 ret = fattr->show(policy, buf);
698 unlock_policy_rwsem_read(policy->cpu);
699 up_read(&cpufreq_rwsem);
704 static ssize_t store(struct kobject *kobj, struct attribute *attr,
705 const char *buf, size_t count)
707 struct cpufreq_policy *policy = to_policy(kobj);
708 struct freq_attr *fattr = to_attr(attr);
709 ssize_t ret = -EINVAL;
713 if (!cpu_online(policy->cpu))
716 if (!down_read_trylock(&cpufreq_rwsem))
719 lock_policy_rwsem_write(policy->cpu);
722 ret = fattr->store(policy, buf, count);
726 unlock_policy_rwsem_write(policy->cpu);
728 up_read(&cpufreq_rwsem);
735 static void cpufreq_sysfs_release(struct kobject *kobj)
737 struct cpufreq_policy *policy = to_policy(kobj);
738 pr_debug("last reference is dropped\n");
739 complete(&policy->kobj_unregister);
742 static const struct sysfs_ops sysfs_ops = {
747 static struct kobj_type ktype_cpufreq = {
748 .sysfs_ops = &sysfs_ops,
749 .default_attrs = default_attrs,
750 .release = cpufreq_sysfs_release,
753 struct kobject *cpufreq_global_kobject;
754 EXPORT_SYMBOL(cpufreq_global_kobject);
756 static int cpufreq_global_kobject_usage;
758 int cpufreq_get_global_kobject(void)
760 if (!cpufreq_global_kobject_usage++)
761 return kobject_add(cpufreq_global_kobject,
762 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
766 EXPORT_SYMBOL(cpufreq_get_global_kobject);
768 void cpufreq_put_global_kobject(void)
770 if (!--cpufreq_global_kobject_usage)
771 kobject_del(cpufreq_global_kobject);
773 EXPORT_SYMBOL(cpufreq_put_global_kobject);
775 int cpufreq_sysfs_create_file(const struct attribute *attr)
777 int ret = cpufreq_get_global_kobject();
780 ret = sysfs_create_file(cpufreq_global_kobject, attr);
782 cpufreq_put_global_kobject();
787 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
789 void cpufreq_sysfs_remove_file(const struct attribute *attr)
791 sysfs_remove_file(cpufreq_global_kobject, attr);
792 cpufreq_put_global_kobject();
794 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
796 /* symlink affected CPUs */
797 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
802 for_each_cpu(j, policy->cpus) {
803 struct device *cpu_dev;
805 if (j == policy->cpu)
808 pr_debug("Adding link for CPU: %u\n", j);
809 cpu_dev = get_cpu_device(j);
810 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
818 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
821 struct freq_attr **drv_attr;
824 /* prepare interface data */
825 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
826 &dev->kobj, "cpufreq");
830 /* set up files for this cpu device */
831 drv_attr = cpufreq_driver->attr;
832 while ((drv_attr) && (*drv_attr)) {
833 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
835 goto err_out_kobj_put;
838 if (cpufreq_driver->get) {
839 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
841 goto err_out_kobj_put;
844 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
846 goto err_out_kobj_put;
848 if (cpufreq_driver->bios_limit) {
849 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
851 goto err_out_kobj_put;
854 ret = cpufreq_add_dev_symlink(policy);
856 goto err_out_kobj_put;
861 kobject_put(&policy->kobj);
862 wait_for_completion(&policy->kobj_unregister);
866 static void cpufreq_init_policy(struct cpufreq_policy *policy)
868 struct cpufreq_policy new_policy;
871 memcpy(&new_policy, policy, sizeof(*policy));
872 /* assure that the starting sequence is run in cpufreq_set_policy */
873 policy->governor = NULL;
875 /* set default policy */
876 ret = cpufreq_set_policy(policy, &new_policy);
877 policy->user_policy.policy = policy->policy;
878 policy->user_policy.governor = policy->governor;
881 pr_debug("setting policy failed\n");
882 if (cpufreq_driver->exit)
883 cpufreq_driver->exit(policy);
887 #ifdef CONFIG_HOTPLUG_CPU
888 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
889 unsigned int cpu, struct device *dev,
896 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
898 pr_err("%s: Failed to stop governor\n", __func__);
903 lock_policy_rwsem_write(policy->cpu);
905 write_lock_irqsave(&cpufreq_driver_lock, flags);
907 cpumask_set_cpu(cpu, policy->cpus);
908 per_cpu(cpufreq_cpu_data, cpu) = policy;
909 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
911 unlock_policy_rwsem_write(policy->cpu);
914 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
915 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
916 pr_err("%s: Failed to start governor\n", __func__);
921 /* Don't touch sysfs links during light-weight init */
923 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
929 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
931 struct cpufreq_policy *policy;
934 read_lock_irqsave(&cpufreq_driver_lock, flags);
936 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
938 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
943 static struct cpufreq_policy *cpufreq_policy_alloc(void)
945 struct cpufreq_policy *policy;
947 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
951 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
952 goto err_free_policy;
954 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
955 goto err_free_cpumask;
957 INIT_LIST_HEAD(&policy->policy_list);
961 free_cpumask_var(policy->cpus);
968 static void cpufreq_policy_free(struct cpufreq_policy *policy)
970 free_cpumask_var(policy->related_cpus);
971 free_cpumask_var(policy->cpus);
975 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
977 if (WARN_ON(cpu == policy->cpu))
981 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
982 * Also lock for last cpu is enough here as contention will happen only
983 * after policy->cpu is changed and after it is changed, other threads
984 * will try to acquire lock for new cpu. And policy is already updated
987 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
989 policy->last_cpu = policy->cpu;
992 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
994 cpufreq_frequency_table_update_policy_cpu(policy);
995 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
996 CPUFREQ_UPDATE_POLICY_CPU, policy);
999 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1002 unsigned int j, cpu = dev->id;
1004 struct cpufreq_policy *policy;
1005 unsigned long flags;
1006 #ifdef CONFIG_HOTPLUG_CPU
1007 struct cpufreq_policy *tpolicy;
1008 struct cpufreq_governor *gov;
1011 if (cpu_is_offline(cpu))
1014 pr_debug("adding CPU %u\n", cpu);
1017 /* check whether a different CPU already registered this
1018 * CPU because it is in the same boat. */
1019 policy = cpufreq_cpu_get(cpu);
1020 if (unlikely(policy)) {
1021 cpufreq_cpu_put(policy);
1026 if (!down_read_trylock(&cpufreq_rwsem))
1029 #ifdef CONFIG_HOTPLUG_CPU
1030 /* Check if this cpu was hot-unplugged earlier and has siblings */
1031 read_lock_irqsave(&cpufreq_driver_lock, flags);
1032 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1033 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1034 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1035 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
1036 up_read(&cpufreq_rwsem);
1040 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1044 /* Restore the saved policy when doing light-weight init */
1045 policy = cpufreq_policy_restore(cpu);
1047 policy = cpufreq_policy_alloc();
1054 * In the resume path, since we restore a saved policy, the assignment
1055 * to policy->cpu is like an update of the existing policy, rather than
1056 * the creation of a brand new one. So we need to perform this update
1057 * by invoking update_policy_cpu().
1059 if (frozen && cpu != policy->cpu)
1060 update_policy_cpu(policy, cpu);
1064 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1065 cpumask_copy(policy->cpus, cpumask_of(cpu));
1067 init_completion(&policy->kobj_unregister);
1068 INIT_WORK(&policy->update, handle_update);
1070 /* call driver. From then on the cpufreq must be able
1071 * to accept all calls to ->verify and ->setpolicy for this CPU
1073 ret = cpufreq_driver->init(policy);
1075 pr_debug("initialization failed\n");
1076 goto err_set_policy_cpu;
1079 if (cpufreq_driver->get) {
1080 policy->cur = cpufreq_driver->get(policy->cpu);
1082 pr_err("%s: ->get() failed\n", __func__);
1087 /* related cpus should atleast have policy->cpus */
1088 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1091 * affected cpus must always be the one, which are online. We aren't
1092 * managing offline cpus here.
1094 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1096 policy->user_policy.min = policy->min;
1097 policy->user_policy.max = policy->max;
1099 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1100 CPUFREQ_START, policy);
1102 #ifdef CONFIG_HOTPLUG_CPU
1103 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1105 policy->governor = gov;
1106 pr_debug("Restoring governor %s for cpu %d\n",
1107 policy->governor->name, cpu);
1111 write_lock_irqsave(&cpufreq_driver_lock, flags);
1112 for_each_cpu(j, policy->cpus)
1113 per_cpu(cpufreq_cpu_data, j) = policy;
1114 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1117 ret = cpufreq_add_dev_interface(policy, dev);
1119 goto err_out_unregister;
1122 write_lock_irqsave(&cpufreq_driver_lock, flags);
1123 list_add(&policy->policy_list, &cpufreq_policy_list);
1124 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1126 cpufreq_init_policy(policy);
1128 kobject_uevent(&policy->kobj, KOBJ_ADD);
1129 up_read(&cpufreq_rwsem);
1131 pr_debug("initialization complete\n");
1136 write_lock_irqsave(&cpufreq_driver_lock, flags);
1137 for_each_cpu(j, policy->cpus)
1138 per_cpu(cpufreq_cpu_data, j) = NULL;
1139 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1142 if (cpufreq_driver->exit)
1143 cpufreq_driver->exit(policy);
1145 cpufreq_policy_free(policy);
1147 up_read(&cpufreq_rwsem);
1153 * cpufreq_add_dev - add a CPU device
1155 * Adds the cpufreq interface for a CPU device.
1157 * The Oracle says: try running cpufreq registration/unregistration concurrently
1158 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1159 * mess up, but more thorough testing is needed. - Mathieu
1161 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1163 return __cpufreq_add_dev(dev, sif, false);
1166 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1167 unsigned int old_cpu, bool frozen)
1169 struct device *cpu_dev;
1172 /* first sibling now owns the new sysfs dir */
1173 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1175 /* Don't touch sysfs files during light-weight tear-down */
1179 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1180 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1182 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1184 lock_policy_rwsem_write(old_cpu);
1185 cpumask_set_cpu(old_cpu, policy->cpus);
1186 unlock_policy_rwsem_write(old_cpu);
1188 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1197 static int __cpufreq_remove_dev_prepare(struct device *dev,
1198 struct subsys_interface *sif,
1201 unsigned int cpu = dev->id, cpus;
1203 unsigned long flags;
1204 struct cpufreq_policy *policy;
1206 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1208 write_lock_irqsave(&cpufreq_driver_lock, flags);
1210 policy = per_cpu(cpufreq_cpu_data, cpu);
1212 /* Save the policy somewhere when doing a light-weight tear-down */
1214 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1216 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1219 pr_debug("%s: No cpu_data found\n", __func__);
1224 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1226 pr_err("%s: Failed to stop governor\n", __func__);
1231 #ifdef CONFIG_HOTPLUG_CPU
1232 if (!cpufreq_driver->setpolicy)
1233 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1234 policy->governor->name, CPUFREQ_NAME_LEN);
1237 lock_policy_rwsem_read(cpu);
1238 cpus = cpumask_weight(policy->cpus);
1239 unlock_policy_rwsem_read(cpu);
1241 if (cpu != policy->cpu) {
1243 sysfs_remove_link(&dev->kobj, "cpufreq");
1244 } else if (cpus > 1) {
1245 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1247 update_policy_cpu(policy, new_cpu);
1250 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1251 __func__, new_cpu, cpu);
1259 static int __cpufreq_remove_dev_finish(struct device *dev,
1260 struct subsys_interface *sif,
1263 unsigned int cpu = dev->id, cpus;
1265 unsigned long flags;
1266 struct cpufreq_policy *policy;
1267 struct kobject *kobj;
1268 struct completion *cmp;
1270 read_lock_irqsave(&cpufreq_driver_lock, flags);
1271 policy = per_cpu(cpufreq_cpu_data, cpu);
1272 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1275 pr_debug("%s: No cpu_data found\n", __func__);
1279 lock_policy_rwsem_write(cpu);
1280 cpus = cpumask_weight(policy->cpus);
1283 cpumask_clear_cpu(cpu, policy->cpus);
1284 unlock_policy_rwsem_write(cpu);
1286 /* If cpu is last user of policy, free policy */
1289 ret = __cpufreq_governor(policy,
1290 CPUFREQ_GOV_POLICY_EXIT);
1292 pr_err("%s: Failed to exit governor\n",
1299 lock_policy_rwsem_read(cpu);
1300 kobj = &policy->kobj;
1301 cmp = &policy->kobj_unregister;
1302 unlock_policy_rwsem_read(cpu);
1306 * We need to make sure that the underlying kobj is
1307 * actually not referenced anymore by anybody before we
1308 * proceed with unloading.
1310 pr_debug("waiting for dropping of refcount\n");
1311 wait_for_completion(cmp);
1312 pr_debug("wait complete\n");
1316 * Perform the ->exit() even during light-weight tear-down,
1317 * since this is a core component, and is essential for the
1318 * subsequent light-weight ->init() to succeed.
1320 if (cpufreq_driver->exit)
1321 cpufreq_driver->exit(policy);
1323 /* Remove policy from list of active policies */
1324 write_lock_irqsave(&cpufreq_driver_lock, flags);
1325 list_del(&policy->policy_list);
1326 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1329 cpufreq_policy_free(policy);
1332 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1333 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1334 pr_err("%s: Failed to start governor\n",
1341 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1346 * cpufreq_remove_dev - remove a CPU device
1348 * Removes the cpufreq interface for a CPU device.
1350 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1352 unsigned int cpu = dev->id;
1355 if (cpu_is_offline(cpu))
1358 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1361 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1366 static void handle_update(struct work_struct *work)
1368 struct cpufreq_policy *policy =
1369 container_of(work, struct cpufreq_policy, update);
1370 unsigned int cpu = policy->cpu;
1371 pr_debug("handle_update for cpu %u called\n", cpu);
1372 cpufreq_update_policy(cpu);
1376 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1379 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1380 * @new_freq: CPU frequency the CPU actually runs at
1382 * We adjust to current frequency first, and need to clean up later.
1383 * So either call to cpufreq_update_policy() or schedule handle_update()).
1385 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1386 unsigned int new_freq)
1388 struct cpufreq_policy *policy;
1389 struct cpufreq_freqs freqs;
1390 unsigned long flags;
1392 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1393 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1395 freqs.old = old_freq;
1396 freqs.new = new_freq;
1398 read_lock_irqsave(&cpufreq_driver_lock, flags);
1399 policy = per_cpu(cpufreq_cpu_data, cpu);
1400 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1402 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1403 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1407 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1410 * This is the last known freq, without actually getting it from the driver.
1411 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1413 unsigned int cpufreq_quick_get(unsigned int cpu)
1415 struct cpufreq_policy *policy;
1416 unsigned int ret_freq = 0;
1418 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1419 return cpufreq_driver->get(cpu);
1421 policy = cpufreq_cpu_get(cpu);
1423 ret_freq = policy->cur;
1424 cpufreq_cpu_put(policy);
1429 EXPORT_SYMBOL(cpufreq_quick_get);
1432 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1435 * Just return the max possible frequency for a given CPU.
1437 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1439 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1440 unsigned int ret_freq = 0;
1443 ret_freq = policy->max;
1444 cpufreq_cpu_put(policy);
1449 EXPORT_SYMBOL(cpufreq_quick_get_max);
1451 static unsigned int __cpufreq_get(unsigned int cpu)
1453 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1454 unsigned int ret_freq = 0;
1456 if (!cpufreq_driver->get)
1459 ret_freq = cpufreq_driver->get(cpu);
1461 if (ret_freq && policy->cur &&
1462 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1463 /* verify no discrepancy between actual and
1464 saved value exists */
1465 if (unlikely(ret_freq != policy->cur)) {
1466 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1467 schedule_work(&policy->update);
1475 * cpufreq_get - get the current CPU frequency (in kHz)
1478 * Get the CPU current (static) CPU frequency
1480 unsigned int cpufreq_get(unsigned int cpu)
1482 unsigned int ret_freq = 0;
1484 if (cpufreq_disabled() || !cpufreq_driver)
1487 if (!down_read_trylock(&cpufreq_rwsem))
1490 lock_policy_rwsem_read(cpu);
1492 ret_freq = __cpufreq_get(cpu);
1494 unlock_policy_rwsem_read(cpu);
1495 up_read(&cpufreq_rwsem);
1499 EXPORT_SYMBOL(cpufreq_get);
1501 static struct subsys_interface cpufreq_interface = {
1503 .subsys = &cpu_subsys,
1504 .add_dev = cpufreq_add_dev,
1505 .remove_dev = cpufreq_remove_dev,
1509 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1511 * This function is only executed for the boot processor. The other CPUs
1512 * have been put offline by means of CPU hotplug.
1514 static int cpufreq_bp_suspend(void)
1518 int cpu = smp_processor_id();
1519 struct cpufreq_policy *policy;
1521 pr_debug("suspending cpu %u\n", cpu);
1523 /* If there's no policy for the boot CPU, we have nothing to do. */
1524 policy = cpufreq_cpu_get(cpu);
1528 if (cpufreq_driver->suspend) {
1529 ret = cpufreq_driver->suspend(policy);
1531 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1532 "step on CPU %u\n", policy->cpu);
1535 cpufreq_cpu_put(policy);
1540 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1542 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1543 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1544 * restored. It will verify that the current freq is in sync with
1545 * what we believe it to be. This is a bit later than when it
1546 * should be, but nonethteless it's better than calling
1547 * cpufreq_driver->get() here which might re-enable interrupts...
1549 * This function is only executed for the boot CPU. The other CPUs have not
1550 * been turned on yet.
1552 static void cpufreq_bp_resume(void)
1556 int cpu = smp_processor_id();
1557 struct cpufreq_policy *policy;
1559 pr_debug("resuming cpu %u\n", cpu);
1561 /* If there's no policy for the boot CPU, we have nothing to do. */
1562 policy = cpufreq_cpu_get(cpu);
1566 if (cpufreq_driver->resume) {
1567 ret = cpufreq_driver->resume(policy);
1569 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1570 "step on CPU %u\n", policy->cpu);
1575 schedule_work(&policy->update);
1578 cpufreq_cpu_put(policy);
1581 static struct syscore_ops cpufreq_syscore_ops = {
1582 .suspend = cpufreq_bp_suspend,
1583 .resume = cpufreq_bp_resume,
1587 * cpufreq_get_current_driver - return current driver's name
1589 * Return the name string of the currently loaded cpufreq driver
1592 const char *cpufreq_get_current_driver(void)
1595 return cpufreq_driver->name;
1599 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1601 /*********************************************************************
1602 * NOTIFIER LISTS INTERFACE *
1603 *********************************************************************/
1606 * cpufreq_register_notifier - register a driver with cpufreq
1607 * @nb: notifier function to register
1608 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1610 * Add a driver to one of two lists: either a list of drivers that
1611 * are notified about clock rate changes (once before and once after
1612 * the transition), or a list of drivers that are notified about
1613 * changes in cpufreq policy.
1615 * This function may sleep, and has the same return conditions as
1616 * blocking_notifier_chain_register.
1618 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1622 if (cpufreq_disabled())
1625 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1628 case CPUFREQ_TRANSITION_NOTIFIER:
1629 ret = srcu_notifier_chain_register(
1630 &cpufreq_transition_notifier_list, nb);
1632 case CPUFREQ_POLICY_NOTIFIER:
1633 ret = blocking_notifier_chain_register(
1634 &cpufreq_policy_notifier_list, nb);
1642 EXPORT_SYMBOL(cpufreq_register_notifier);
1645 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1646 * @nb: notifier block to be unregistered
1647 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1649 * Remove a driver from the CPU frequency notifier list.
1651 * This function may sleep, and has the same return conditions as
1652 * blocking_notifier_chain_unregister.
1654 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1658 if (cpufreq_disabled())
1662 case CPUFREQ_TRANSITION_NOTIFIER:
1663 ret = srcu_notifier_chain_unregister(
1664 &cpufreq_transition_notifier_list, nb);
1666 case CPUFREQ_POLICY_NOTIFIER:
1667 ret = blocking_notifier_chain_unregister(
1668 &cpufreq_policy_notifier_list, nb);
1676 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1679 /*********************************************************************
1681 *********************************************************************/
1683 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1684 unsigned int target_freq,
1685 unsigned int relation)
1687 int retval = -EINVAL;
1688 unsigned int old_target_freq = target_freq;
1690 if (cpufreq_disabled())
1693 /* Make sure that target_freq is within supported range */
1694 if (target_freq > policy->max)
1695 target_freq = policy->max;
1696 if (target_freq < policy->min)
1697 target_freq = policy->min;
1699 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1700 policy->cpu, target_freq, relation, old_target_freq);
1703 * This might look like a redundant call as we are checking it again
1704 * after finding index. But it is left intentionally for cases where
1705 * exactly same freq is called again and so we can save on few function
1708 if (target_freq == policy->cur)
1711 if (cpufreq_driver->target)
1712 retval = cpufreq_driver->target(policy, target_freq, relation);
1713 else if (cpufreq_driver->target_index) {
1714 struct cpufreq_frequency_table *freq_table;
1717 freq_table = cpufreq_frequency_get_table(policy->cpu);
1718 if (unlikely(!freq_table)) {
1719 pr_err("%s: Unable to find freq_table\n", __func__);
1723 retval = cpufreq_frequency_table_target(policy, freq_table,
1724 target_freq, relation, &index);
1725 if (unlikely(retval)) {
1726 pr_err("%s: Unable to find matching freq\n", __func__);
1730 if (freq_table[index].frequency == policy->cur)
1733 retval = cpufreq_driver->target_index(policy, index);
1739 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1741 int cpufreq_driver_target(struct cpufreq_policy *policy,
1742 unsigned int target_freq,
1743 unsigned int relation)
1747 lock_policy_rwsem_write(policy->cpu);
1749 ret = __cpufreq_driver_target(policy, target_freq, relation);
1751 unlock_policy_rwsem_write(policy->cpu);
1755 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1758 * when "event" is CPUFREQ_GOV_LIMITS
1761 static int __cpufreq_governor(struct cpufreq_policy *policy,
1766 /* Only must be defined when default governor is known to have latency
1767 restrictions, like e.g. conservative or ondemand.
1768 That this is the case is already ensured in Kconfig
1770 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1771 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1773 struct cpufreq_governor *gov = NULL;
1776 if (policy->governor->max_transition_latency &&
1777 policy->cpuinfo.transition_latency >
1778 policy->governor->max_transition_latency) {
1782 printk(KERN_WARNING "%s governor failed, too long"
1783 " transition latency of HW, fallback"
1784 " to %s governor\n",
1785 policy->governor->name,
1787 policy->governor = gov;
1791 if (event == CPUFREQ_GOV_POLICY_INIT)
1792 if (!try_module_get(policy->governor->owner))
1795 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1796 policy->cpu, event);
1798 mutex_lock(&cpufreq_governor_lock);
1799 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1800 || (!policy->governor_enabled
1801 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1802 mutex_unlock(&cpufreq_governor_lock);
1806 if (event == CPUFREQ_GOV_STOP)
1807 policy->governor_enabled = false;
1808 else if (event == CPUFREQ_GOV_START)
1809 policy->governor_enabled = true;
1811 mutex_unlock(&cpufreq_governor_lock);
1813 ret = policy->governor->governor(policy, event);
1816 if (event == CPUFREQ_GOV_POLICY_INIT)
1817 policy->governor->initialized++;
1818 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1819 policy->governor->initialized--;
1821 /* Restore original values */
1822 mutex_lock(&cpufreq_governor_lock);
1823 if (event == CPUFREQ_GOV_STOP)
1824 policy->governor_enabled = true;
1825 else if (event == CPUFREQ_GOV_START)
1826 policy->governor_enabled = false;
1827 mutex_unlock(&cpufreq_governor_lock);
1830 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1831 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1832 module_put(policy->governor->owner);
1837 int cpufreq_register_governor(struct cpufreq_governor *governor)
1844 if (cpufreq_disabled())
1847 mutex_lock(&cpufreq_governor_mutex);
1849 governor->initialized = 0;
1851 if (__find_governor(governor->name) == NULL) {
1853 list_add(&governor->governor_list, &cpufreq_governor_list);
1856 mutex_unlock(&cpufreq_governor_mutex);
1859 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1861 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1863 #ifdef CONFIG_HOTPLUG_CPU
1870 if (cpufreq_disabled())
1873 #ifdef CONFIG_HOTPLUG_CPU
1874 for_each_present_cpu(cpu) {
1875 if (cpu_online(cpu))
1877 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1878 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1882 mutex_lock(&cpufreq_governor_mutex);
1883 list_del(&governor->governor_list);
1884 mutex_unlock(&cpufreq_governor_mutex);
1887 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1890 /*********************************************************************
1891 * POLICY INTERFACE *
1892 *********************************************************************/
1895 * cpufreq_get_policy - get the current cpufreq_policy
1896 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1899 * Reads the current cpufreq policy.
1901 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1903 struct cpufreq_policy *cpu_policy;
1907 cpu_policy = cpufreq_cpu_get(cpu);
1911 memcpy(policy, cpu_policy, sizeof(*policy));
1913 cpufreq_cpu_put(cpu_policy);
1916 EXPORT_SYMBOL(cpufreq_get_policy);
1919 * policy : current policy.
1920 * new_policy: policy to be set.
1922 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1923 struct cpufreq_policy *new_policy)
1925 int ret = 0, failed = 1;
1927 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1928 new_policy->min, new_policy->max);
1930 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1932 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1937 /* verify the cpu speed can be set within this limit */
1938 ret = cpufreq_driver->verify(new_policy);
1942 /* adjust if necessary - all reasons */
1943 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1944 CPUFREQ_ADJUST, new_policy);
1946 /* adjust if necessary - hardware incompatibility*/
1947 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1948 CPUFREQ_INCOMPATIBLE, new_policy);
1951 * verify the cpu speed can be set within this limit, which might be
1952 * different to the first one
1954 ret = cpufreq_driver->verify(new_policy);
1958 /* notification of the new policy */
1959 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1960 CPUFREQ_NOTIFY, new_policy);
1962 policy->min = new_policy->min;
1963 policy->max = new_policy->max;
1965 pr_debug("new min and max freqs are %u - %u kHz\n",
1966 policy->min, policy->max);
1968 if (cpufreq_driver->setpolicy) {
1969 policy->policy = new_policy->policy;
1970 pr_debug("setting range\n");
1971 ret = cpufreq_driver->setpolicy(new_policy);
1973 if (new_policy->governor != policy->governor) {
1974 /* save old, working values */
1975 struct cpufreq_governor *old_gov = policy->governor;
1977 pr_debug("governor switch\n");
1979 /* end old governor */
1980 if (policy->governor) {
1981 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1982 unlock_policy_rwsem_write(new_policy->cpu);
1983 __cpufreq_governor(policy,
1984 CPUFREQ_GOV_POLICY_EXIT);
1985 lock_policy_rwsem_write(new_policy->cpu);
1988 /* start new governor */
1989 policy->governor = new_policy->governor;
1990 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1991 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1994 unlock_policy_rwsem_write(new_policy->cpu);
1995 __cpufreq_governor(policy,
1996 CPUFREQ_GOV_POLICY_EXIT);
1997 lock_policy_rwsem_write(new_policy->cpu);
2002 /* new governor failed, so re-start old one */
2003 pr_debug("starting governor %s failed\n",
2004 policy->governor->name);
2006 policy->governor = old_gov;
2007 __cpufreq_governor(policy,
2008 CPUFREQ_GOV_POLICY_INIT);
2009 __cpufreq_governor(policy,
2015 /* might be a policy change, too, so fall through */
2017 pr_debug("governor: change or update limits\n");
2018 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2026 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2027 * @cpu: CPU which shall be re-evaluated
2029 * Useful for policy notifiers which have different necessities
2030 * at different times.
2032 int cpufreq_update_policy(unsigned int cpu)
2034 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2035 struct cpufreq_policy new_policy;
2043 lock_policy_rwsem_write(cpu);
2045 pr_debug("updating policy for CPU %u\n", cpu);
2046 memcpy(&new_policy, policy, sizeof(*policy));
2047 new_policy.min = policy->user_policy.min;
2048 new_policy.max = policy->user_policy.max;
2049 new_policy.policy = policy->user_policy.policy;
2050 new_policy.governor = policy->user_policy.governor;
2053 * BIOS might change freq behind our back
2054 * -> ask driver for current freq and notify governors about a change
2056 if (cpufreq_driver->get) {
2057 new_policy.cur = cpufreq_driver->get(cpu);
2059 pr_debug("Driver did not initialize current freq");
2060 policy->cur = new_policy.cur;
2062 if (policy->cur != new_policy.cur && has_target())
2063 cpufreq_out_of_sync(cpu, policy->cur,
2068 ret = cpufreq_set_policy(policy, &new_policy);
2070 unlock_policy_rwsem_write(cpu);
2072 cpufreq_cpu_put(policy);
2076 EXPORT_SYMBOL(cpufreq_update_policy);
2078 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2079 unsigned long action, void *hcpu)
2081 unsigned int cpu = (unsigned long)hcpu;
2083 bool frozen = false;
2085 dev = get_cpu_device(cpu);
2088 if (action & CPU_TASKS_FROZEN)
2091 switch (action & ~CPU_TASKS_FROZEN) {
2093 __cpufreq_add_dev(dev, NULL, frozen);
2094 cpufreq_update_policy(cpu);
2097 case CPU_DOWN_PREPARE:
2098 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2102 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2105 case CPU_DOWN_FAILED:
2106 __cpufreq_add_dev(dev, NULL, frozen);
2113 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2114 .notifier_call = cpufreq_cpu_callback,
2117 /*********************************************************************
2118 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2119 *********************************************************************/
2122 * cpufreq_register_driver - register a CPU Frequency driver
2123 * @driver_data: A struct cpufreq_driver containing the values#
2124 * submitted by the CPU Frequency driver.
2126 * Registers a CPU Frequency driver to this core code. This code
2127 * returns zero on success, -EBUSY when another driver got here first
2128 * (and isn't unregistered in the meantime).
2131 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2133 unsigned long flags;
2136 if (cpufreq_disabled())
2139 if (!driver_data || !driver_data->verify || !driver_data->init ||
2140 !(driver_data->setpolicy || driver_data->target_index ||
2141 driver_data->target))
2144 pr_debug("trying to register driver %s\n", driver_data->name);
2146 if (driver_data->setpolicy)
2147 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2149 write_lock_irqsave(&cpufreq_driver_lock, flags);
2150 if (cpufreq_driver) {
2151 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2154 cpufreq_driver = driver_data;
2155 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2157 ret = subsys_interface_register(&cpufreq_interface);
2159 goto err_null_driver;
2161 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2165 /* check for at least one working CPU */
2166 for (i = 0; i < nr_cpu_ids; i++)
2167 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2172 /* if all ->init() calls failed, unregister */
2174 pr_debug("no CPU initialized for driver %s\n",
2180 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2181 pr_debug("driver %s up and running\n", driver_data->name);
2185 subsys_interface_unregister(&cpufreq_interface);
2187 write_lock_irqsave(&cpufreq_driver_lock, flags);
2188 cpufreq_driver = NULL;
2189 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2192 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2195 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2197 * Unregister the current CPUFreq driver. Only call this if you have
2198 * the right to do so, i.e. if you have succeeded in initialising before!
2199 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2200 * currently not initialised.
2202 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2204 unsigned long flags;
2206 if (!cpufreq_driver || (driver != cpufreq_driver))
2209 pr_debug("unregistering driver %s\n", driver->name);
2211 subsys_interface_unregister(&cpufreq_interface);
2212 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2214 down_write(&cpufreq_rwsem);
2215 write_lock_irqsave(&cpufreq_driver_lock, flags);
2217 cpufreq_driver = NULL;
2219 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2220 up_write(&cpufreq_rwsem);
2224 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2226 static int __init cpufreq_core_init(void)
2230 if (cpufreq_disabled())
2233 for_each_possible_cpu(cpu)
2234 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2236 cpufreq_global_kobject = kobject_create();
2237 BUG_ON(!cpufreq_global_kobject);
2238 register_syscore_ops(&cpufreq_syscore_ops);
2242 core_initcall(cpufreq_core_init);