2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/cpufreq.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/rculist.h>
22 #include <linux/rcupdate.h>
23 #include <linux/pm_opp.h>
25 #include <linux/export.h>
28 * Internal data structure organization with the OPP layer library is as
31 * |- device 1 (represents voltage domain 1)
32 * | |- opp 1 (availability, freq, voltage)
36 * |- device 2 (represents the next voltage domain)
38 * `- device m (represents mth voltage domain)
39 * device 1, 2.. are represented by dev_opp structure while each opp
40 * is represented by the opp structure.
44 * struct dev_pm_opp - Generic OPP description structure
45 * @node: opp list node. The nodes are maintained throughout the lifetime
46 * of boot. It is expected only an optimal set of OPPs are
47 * added to the library by the SoC framework.
48 * RCU usage: opp list is traversed with RCU locks. node
49 * modification is possible realtime, hence the modifications
50 * are protected by the dev_opp_list_lock for integrity.
51 * IMPORTANT: the opp nodes should be maintained in increasing
53 * @available: true/false - marks if this OPP as available or not
54 * @rate: Frequency in hertz
55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
56 * @dev_opp: points back to the device_opp struct this opp belongs to
57 * @head: RCU callback head used for deferred freeing
59 * This structure stores the OPP information for a given device.
62 struct list_head node;
68 struct device_opp *dev_opp;
73 * struct device_opp - Device opp structure
74 * @node: list node - contains the devices with OPPs that
75 * have been registered. Nodes once added are not modified in this
77 * RCU usage: nodes are not modified in the list of device_opp,
78 * however addition is possible and is secured by dev_opp_list_lock
79 * @dev: device pointer
80 * @head: notifier head to notify the OPP availability changes.
81 * @opp_list: list of opps
83 * This is an internal data structure maintaining the link to opps attached to
84 * a device. This structure is not meant to be shared to users as it is
85 * meant for book keeping and private to OPP library
88 struct list_head node;
91 struct srcu_notifier_head head;
92 struct list_head opp_list;
96 * The root of the list of all devices. All device_opp structures branch off
97 * from here, with each device_opp containing the list of opp it supports in
98 * various states of availability.
100 static LIST_HEAD(dev_opp_list);
101 /* Lock to allow exclusive modification to the device and opp lists */
102 static DEFINE_MUTEX(dev_opp_list_lock);
105 * find_device_opp() - find device_opp struct using device pointer
106 * @dev: device pointer used to lookup device OPPs
108 * Search list of device OPPs for one containing matching device. Does a RCU
109 * reader operation to grab the pointer needed.
111 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
112 * -EINVAL based on type of error.
114 * Locking: This function must be called under rcu_read_lock(). device_opp
115 * is a RCU protected pointer. This means that device_opp is valid as long
116 * as we are under RCU lock.
118 static struct device_opp *find_device_opp(struct device *dev)
120 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
122 if (unlikely(IS_ERR_OR_NULL(dev))) {
123 pr_err("%s: Invalid parameters\n", __func__);
124 return ERR_PTR(-EINVAL);
127 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
128 if (tmp_dev_opp->dev == dev) {
129 dev_opp = tmp_dev_opp;
138 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
139 * @opp: opp for which voltage has to be returned for
141 * Return voltage in micro volt corresponding to the opp, else
144 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
145 * protected pointer. This means that opp which could have been fetched by
146 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
147 * under RCU lock. The pointer returned by the opp_find_freq family must be
148 * used in the same section as the usage of this function with the pointer
149 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
152 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
154 struct dev_pm_opp *tmp_opp;
157 tmp_opp = rcu_dereference(opp);
158 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
159 pr_err("%s: Invalid parameters\n", __func__);
165 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
168 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
169 * @opp: opp for which frequency has to be returned for
171 * Return frequency in hertz corresponding to the opp, else
174 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
175 * protected pointer. This means that opp which could have been fetched by
176 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
177 * under RCU lock. The pointer returned by the opp_find_freq family must be
178 * used in the same section as the usage of this function with the pointer
179 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
182 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
184 struct dev_pm_opp *tmp_opp;
187 tmp_opp = rcu_dereference(opp);
188 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
189 pr_err("%s: Invalid parameters\n", __func__);
195 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
198 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
199 * @dev: device for which we do this operation
201 * This function returns the number of available opps if there are any,
202 * else returns 0 if none or the corresponding error value.
204 * Locking: This function must be called under rcu_read_lock(). This function
205 * internally references two RCU protected structures: device_opp and opp which
206 * are safe as long as we are under a common RCU locked section.
208 int dev_pm_opp_get_opp_count(struct device *dev)
210 struct device_opp *dev_opp;
211 struct dev_pm_opp *temp_opp;
214 dev_opp = find_device_opp(dev);
215 if (IS_ERR(dev_opp)) {
216 int r = PTR_ERR(dev_opp);
217 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
221 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
222 if (temp_opp->available)
228 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
231 * dev_pm_opp_find_freq_exact() - search for an exact frequency
232 * @dev: device for which we do this operation
233 * @freq: frequency to search for
234 * @available: true/false - match for available opp
236 * Searches for exact match in the opp list and returns pointer to the matching
237 * opp if found, else returns ERR_PTR in case of error and should be handled
238 * using IS_ERR. Error return values can be:
239 * EINVAL: for bad pointer
240 * ERANGE: no match found for search
241 * ENODEV: if device not found in list of registered devices
243 * Note: available is a modifier for the search. if available=true, then the
244 * match is for exact matching frequency and is available in the stored OPP
245 * table. if false, the match is for exact frequency which is not available.
247 * This provides a mechanism to enable an opp which is not available currently
248 * or the opposite as well.
250 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
251 * protected pointer. The reason for the same is that the opp pointer which is
252 * returned will remain valid for use with opp_get_{voltage, freq} only while
253 * under the locked area. The pointer returned must be used prior to unlocking
254 * with rcu_read_unlock() to maintain the integrity of the pointer.
256 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
260 struct device_opp *dev_opp;
261 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
263 dev_opp = find_device_opp(dev);
264 if (IS_ERR(dev_opp)) {
265 int r = PTR_ERR(dev_opp);
266 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
270 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
271 if (temp_opp->available == available &&
272 temp_opp->rate == freq) {
280 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
283 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
284 * @dev: device for which we do this operation
285 * @freq: Start frequency
287 * Search for the matching ceil *available* OPP from a starting freq
290 * Returns matching *opp and refreshes *freq accordingly, else returns
291 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
293 * EINVAL: for bad pointer
294 * ERANGE: no match found for search
295 * ENODEV: if device not found in list of registered devices
297 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
298 * protected pointer. The reason for the same is that the opp pointer which is
299 * returned will remain valid for use with opp_get_{voltage, freq} only while
300 * under the locked area. The pointer returned must be used prior to unlocking
301 * with rcu_read_unlock() to maintain the integrity of the pointer.
303 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
306 struct device_opp *dev_opp;
307 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
310 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
311 return ERR_PTR(-EINVAL);
314 dev_opp = find_device_opp(dev);
316 return ERR_CAST(dev_opp);
318 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
319 if (temp_opp->available && temp_opp->rate >= *freq) {
328 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
331 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
332 * @dev: device for which we do this operation
333 * @freq: Start frequency
335 * Search for the matching floor *available* OPP from a starting freq
338 * Returns matching *opp and refreshes *freq accordingly, else returns
339 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
341 * EINVAL: for bad pointer
342 * ERANGE: no match found for search
343 * ENODEV: if device not found in list of registered devices
345 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
346 * protected pointer. The reason for the same is that the opp pointer which is
347 * returned will remain valid for use with opp_get_{voltage, freq} only while
348 * under the locked area. The pointer returned must be used prior to unlocking
349 * with rcu_read_unlock() to maintain the integrity of the pointer.
351 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
354 struct device_opp *dev_opp;
355 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
358 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
359 return ERR_PTR(-EINVAL);
362 dev_opp = find_device_opp(dev);
364 return ERR_CAST(dev_opp);
366 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
367 if (temp_opp->available) {
368 /* go to the next node, before choosing prev */
369 if (temp_opp->rate > *freq)
380 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
383 * dev_pm_opp_add() - Add an OPP table from a table definitions
384 * @dev: device for which we do this operation
385 * @freq: Frequency in Hz for this OPP
386 * @u_volt: Voltage in uVolts for this OPP
388 * This function adds an opp definition to the opp list and returns status.
389 * The opp is made available by default and it can be controlled using
390 * dev_pm_opp_enable/disable functions.
392 * Locking: The internal device_opp and opp structures are RCU protected.
393 * Hence this function internally uses RCU updater strategy with mutex locks
394 * to keep the integrity of the internal data structures. Callers should ensure
395 * that this function is *NOT* called under RCU protection or in contexts where
396 * mutex cannot be locked.
398 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
400 struct device_opp *dev_opp = NULL;
401 struct dev_pm_opp *opp, *new_opp;
402 struct list_head *head;
404 /* allocate new OPP node */
405 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
407 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
411 /* Hold our list modification lock here */
412 mutex_lock(&dev_opp_list_lock);
414 /* Check for existing list for 'dev' */
415 dev_opp = find_device_opp(dev);
416 if (IS_ERR(dev_opp)) {
418 * Allocate a new device OPP table. In the infrequent case
419 * where a new device is needed to be added, we pay this
422 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
424 mutex_unlock(&dev_opp_list_lock);
427 "%s: Unable to create device OPP structure\n",
433 srcu_init_notifier_head(&dev_opp->head);
434 INIT_LIST_HEAD(&dev_opp->opp_list);
436 /* Secure the device list modification */
437 list_add_rcu(&dev_opp->node, &dev_opp_list);
440 /* populate the opp table */
441 new_opp->dev_opp = dev_opp;
442 new_opp->rate = freq;
443 new_opp->u_volt = u_volt;
444 new_opp->available = true;
446 /* Insert new OPP in order of increasing frequency */
447 head = &dev_opp->opp_list;
448 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
449 if (new_opp->rate < opp->rate)
455 list_add_rcu(&new_opp->node, head);
456 mutex_unlock(&dev_opp_list_lock);
459 * Notify the changes in the availability of the operable
460 * frequency/voltage list.
462 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
465 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
468 * opp_set_availability() - helper to set the availability of an opp
469 * @dev: device for which we do this operation
470 * @freq: OPP frequency to modify availability
471 * @availability_req: availability status requested for this opp
473 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
474 * share a common logic which is isolated here.
476 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
477 * copy operation, returns 0 if no modifcation was done OR modification was
480 * Locking: The internal device_opp and opp structures are RCU protected.
481 * Hence this function internally uses RCU updater strategy with mutex locks to
482 * keep the integrity of the internal data structures. Callers should ensure
483 * that this function is *NOT* called under RCU protection or in contexts where
484 * mutex locking or synchronize_rcu() blocking calls cannot be used.
486 static int opp_set_availability(struct device *dev, unsigned long freq,
487 bool availability_req)
489 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
490 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
493 /* keep the node allocated */
494 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
496 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
500 mutex_lock(&dev_opp_list_lock);
502 /* Find the device_opp */
503 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
504 if (dev == tmp_dev_opp->dev) {
505 dev_opp = tmp_dev_opp;
509 if (IS_ERR(dev_opp)) {
510 r = PTR_ERR(dev_opp);
511 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
515 /* Do we have the frequency? */
516 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
517 if (tmp_opp->rate == freq) {
527 /* Is update really needed? */
528 if (opp->available == availability_req)
530 /* copy the old data over */
533 /* plug in new node */
534 new_opp->available = availability_req;
536 list_replace_rcu(&opp->node, &new_opp->node);
537 mutex_unlock(&dev_opp_list_lock);
538 kfree_rcu(opp, head);
540 /* Notify the change of the OPP availability */
541 if (availability_req)
542 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
545 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
551 mutex_unlock(&dev_opp_list_lock);
557 * dev_pm_opp_enable() - Enable a specific OPP
558 * @dev: device for which we do this operation
559 * @freq: OPP frequency to enable
561 * Enables a provided opp. If the operation is valid, this returns 0, else the
562 * corresponding error value. It is meant to be used for users an OPP available
563 * after being temporarily made unavailable with dev_pm_opp_disable.
565 * Locking: The internal device_opp and opp structures are RCU protected.
566 * Hence this function indirectly uses RCU and mutex locks to keep the
567 * integrity of the internal data structures. Callers should ensure that
568 * this function is *NOT* called under RCU protection or in contexts where
569 * mutex locking or synchronize_rcu() blocking calls cannot be used.
571 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
573 return opp_set_availability(dev, freq, true);
575 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
578 * dev_pm_opp_disable() - Disable a specific OPP
579 * @dev: device for which we do this operation
580 * @freq: OPP frequency to disable
582 * Disables a provided opp. If the operation is valid, this returns
583 * 0, else the corresponding error value. It is meant to be a temporary
584 * control by users to make this OPP not available until the circumstances are
585 * right to make it available again (with a call to dev_pm_opp_enable).
587 * Locking: The internal device_opp and opp structures are RCU protected.
588 * Hence this function indirectly uses RCU and mutex locks to keep the
589 * integrity of the internal data structures. Callers should ensure that
590 * this function is *NOT* called under RCU protection or in contexts where
591 * mutex locking or synchronize_rcu() blocking calls cannot be used.
593 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
595 return opp_set_availability(dev, freq, false);
597 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
599 #ifdef CONFIG_CPU_FREQ
601 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
602 * @dev: device for which we do this operation
603 * @table: Cpufreq table returned back to caller
605 * Generate a cpufreq table for a provided device- this assumes that the
606 * opp list is already initialized and ready for usage.
608 * This function allocates required memory for the cpufreq table. It is
609 * expected that the caller does the required maintenance such as freeing
610 * the table as required.
612 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
613 * if no memory available for the operation (table is not populated), returns 0
614 * if successful and table is populated.
616 * WARNING: It is important for the callers to ensure refreshing their copy of
617 * the table if any of the mentioned functions have been invoked in the interim.
619 * Locking: The internal device_opp and opp structures are RCU protected.
620 * To simplify the logic, we pretend we are updater and hold relevant mutex here
621 * Callers should ensure that this function is *NOT* called under RCU protection
622 * or in contexts where mutex locking cannot be used.
624 int dev_pm_opp_init_cpufreq_table(struct device *dev,
625 struct cpufreq_frequency_table **table)
627 struct device_opp *dev_opp;
628 struct dev_pm_opp *opp;
629 struct cpufreq_frequency_table *freq_table;
632 /* Pretend as if I am an updater */
633 mutex_lock(&dev_opp_list_lock);
635 dev_opp = find_device_opp(dev);
636 if (IS_ERR(dev_opp)) {
637 int r = PTR_ERR(dev_opp);
638 mutex_unlock(&dev_opp_list_lock);
639 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
643 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
644 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
646 mutex_unlock(&dev_opp_list_lock);
647 dev_warn(dev, "%s: Unable to allocate frequency table\n",
652 list_for_each_entry(opp, &dev_opp->opp_list, node) {
653 if (opp->available) {
654 freq_table[i].driver_data = i;
655 freq_table[i].frequency = opp->rate / 1000;
659 mutex_unlock(&dev_opp_list_lock);
661 freq_table[i].driver_data = i;
662 freq_table[i].frequency = CPUFREQ_TABLE_END;
664 *table = &freq_table[0];
668 EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
671 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
672 * @dev: device for which we do this operation
673 * @table: table to free
675 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
677 void dev_pm_opp_free_cpufreq_table(struct device *dev,
678 struct cpufreq_frequency_table **table)
686 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
687 #endif /* CONFIG_CPU_FREQ */
690 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
691 * @dev: device pointer used to lookup device OPPs.
693 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
695 struct device_opp *dev_opp = find_device_opp(dev);
698 return ERR_CAST(dev_opp); /* matching type */
700 return &dev_opp->head;
705 * of_init_opp_table() - Initialize opp table from device tree
706 * @dev: device pointer used to lookup device OPPs.
708 * Register the initial OPP table with the OPP library for given device.
710 int of_init_opp_table(struct device *dev)
712 const struct property *prop;
716 prop = of_find_property(dev->of_node, "operating-points", NULL);
723 * Each OPP is a set of tuples consisting of frequency and
724 * voltage like <freq-kHz vol-uV>.
726 nr = prop->length / sizeof(u32);
728 dev_err(dev, "%s: Invalid OPP list\n", __func__);
734 unsigned long freq = be32_to_cpup(val++) * 1000;
735 unsigned long volt = be32_to_cpup(val++);
737 if (dev_pm_opp_add(dev, freq, volt)) {
738 dev_warn(dev, "%s: Failed to add OPP %ld\n",
747 EXPORT_SYMBOL_GPL(of_init_opp_table);