struct kref ref;
};
+#define CREATE_TRACE_POINTS
+#include <trace/events/clk.h>
+
struct clk {
struct clk_core *core;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
unsigned long max_rate;
- struct hlist_node child_node;
+ struct hlist_node clks_node;
};
/*** locking ***/
{
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
hlist_for_each_entry(child, &clk->children, child_node)
clk_unprepare_unused_subtree(child);
return;
if (clk_core_is_prepared(clk)) {
+ trace_clk_unprepare(clk);
if (clk->ops->unprepare_unused)
clk->ops->unprepare_unused(clk->hw);
else if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
+ trace_clk_unprepare_complete(clk);
}
}
struct clk_core *child;
unsigned long flags;
+ lockdep_assert_held(&prepare_lock);
+
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
* back to .disable
*/
if (clk_core_is_enabled(clk)) {
+ trace_clk_disable(clk);
if (clk->ops->disable_unused)
clk->ops->disable_unused(clk->hw);
else if (clk->ops->disable)
clk->ops->disable(clk->hw);
+ trace_clk_disable_complete(clk);
}
unlock_out:
*min_rate = 0;
*max_rate = ULONG_MAX;
- hlist_for_each_entry(clk_user, &clk->clks, child_node)
+ hlist_for_each_entry(clk_user, &clk->clks, clks_node)
*min_rate = max(*min_rate, clk_user->min_rate);
- hlist_for_each_entry(clk_user, &clk->clks, child_node)
+ hlist_for_each_entry(clk_user, &clk->clks, clks_node)
*max_rate = min(*max_rate, clk_user->max_rate);
}
WARN_ON(clk->enable_count > 0);
+ trace_clk_unprepare(clk);
+
if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
+ trace_clk_unprepare_complete(clk);
clk_core_unprepare(clk->parent);
}
if (ret)
return ret;
- if (clk->ops->prepare) {
+ trace_clk_prepare(clk);
+
+ if (clk->ops->prepare)
ret = clk->ops->prepare(clk->hw);
- if (ret) {
- clk_core_unprepare(clk->parent);
- return ret;
- }
+
+ trace_clk_prepare_complete(clk);
+
+ if (ret) {
+ clk_core_unprepare(clk->parent);
+ return ret;
}
}
if (--clk->enable_count > 0)
return;
+ trace_clk_disable(clk);
+
if (clk->ops->disable)
clk->ops->disable(clk->hw);
+ trace_clk_disable_complete(clk);
+
clk_core_disable(clk->parent);
}
if (ret)
return ret;
- if (clk->ops->enable) {
+ trace_clk_enable(clk);
+
+ if (clk->ops->enable)
ret = clk->ops->enable(clk->hw);
- if (ret) {
- clk_core_disable(clk->parent);
- return ret;
- }
+
+ trace_clk_enable_complete(clk);
+
+ if (ret) {
+ clk_core_disable(clk->parent);
+ return ret;
}
}
struct clk_core *parent;
struct clk_hw *parent_hw;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
unsigned long parent_accuracy = 0;
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
if (clk->parent)
parent_accuracy = clk->parent->accuracy;
unsigned long parent_rate = 0;
struct clk_core *child;
+ lockdep_assert_held(&prepare_lock);
+
old_rate = clk->rate;
if (clk->parent)
*/
if (clk->prepare_count) {
clk_core_prepare(parent);
+ flags = clk_enable_lock();
clk_core_enable(parent);
clk_core_enable(clk);
+ clk_enable_unlock(flags);
}
/* update the clk tree topology */
struct clk_core *parent,
struct clk_core *old_parent)
{
+ unsigned long flags;
+
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (core->prepare_count) {
+ flags = clk_enable_lock();
clk_core_disable(core);
clk_core_disable(old_parent);
+ clk_enable_unlock(flags);
clk_core_unprepare(old_parent);
}
}
old_parent = __clk_set_parent_before(clk, parent);
+ trace_clk_set_parent(clk, parent);
+
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
+ trace_clk_set_parent_complete(clk, parent);
+
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
+ flags = clk_enable_lock();
clk_core_disable(clk);
clk_core_disable(parent);
+ clk_enable_unlock(flags);
clk_core_unprepare(parent);
}
return ret;
unsigned long new_rate;
int ret = NOTIFY_DONE;
+ lockdep_assert_held(&prepare_lock);
+
new_rate = clk_recalc(clk, parent_rate);
/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
+ long ret;
/* sanity */
if (IS_ERR_OR_NULL(clk))
/* find the closest rate and parent clk/rate */
if (clk->ops->determine_rate) {
parent_hw = parent ? parent->hw : NULL;
- new_rate = clk->ops->determine_rate(clk->hw, rate,
- min_rate,
- max_rate,
- &best_parent_rate,
- &parent_hw);
+ ret = clk->ops->determine_rate(clk->hw, rate,
+ min_rate,
+ max_rate,
+ &best_parent_rate,
+ &parent_hw);
+ if (ret < 0)
+ return NULL;
+
+ new_rate = ret;
parent = parent_hw ? parent_hw->core : NULL;
} else if (clk->ops->round_rate) {
- new_rate = clk->ops->round_rate(clk->hw, rate,
- &best_parent_rate);
+ ret = clk->ops->round_rate(clk->hw, rate,
+ &best_parent_rate);
+ if (ret < 0)
+ return NULL;
+
+ new_rate = ret;
if (new_rate < min_rate || new_rate > max_rate)
return NULL;
} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
if (clk->new_parent && clk->new_parent != clk->parent) {
old_parent = __clk_set_parent_before(clk, clk->new_parent);
+ trace_clk_set_parent(clk, clk->new_parent);
if (clk->ops->set_rate_and_parent) {
skip_set_rate = true;
clk->ops->set_parent(clk->hw, clk->new_parent_index);
}
+ trace_clk_set_parent_complete(clk, clk->new_parent);
__clk_set_parent_after(clk, clk->new_parent, old_parent);
}
+ trace_clk_set_rate(clk, clk->new_rate);
+
if (!skip_set_rate && clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
+ trace_clk_set_rate_complete(clk, clk->new_rate);
+
clk->rate = clk_recalc(clk, best_parent_rate);
if (clk->notifier_count && old_rate != clk->rate)
if (!clk)
return 0;
- /* verify ops for for multi-parent clks */
- if ((clk->num_parents > 1) && (!clk->ops->set_parent))
- return -ENOSYS;
-
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->parent == parent)
goto out;
+ /* verify ops for for multi-parent clks */
+ if ((clk->num_parents > 1) && (!clk->ops->set_parent)) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
/* check that we are allowed to re-parent if the clock is in use */
if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
ret = -EBUSY;
*/
int clk_set_phase(struct clk *clk, int degrees)
{
- int ret = 0;
+ int ret = -EINVAL;
if (!clk)
- goto out;
+ return 0;
/* sanity check degrees */
degrees %= 360;
clk_prepare_lock();
- if (!clk->core->ops->set_phase)
- goto out_unlock;
+ trace_clk_set_phase(clk->core, degrees);
- ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+ if (clk->core->ops->set_phase)
+ ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+
+ trace_clk_set_phase_complete(clk->core, degrees);
if (!ret)
clk->core->phase = degrees;
-out_unlock:
clk_prepare_unlock();
-out:
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_phase);
clk->max_rate = ULONG_MAX;
clk_prepare_lock();
- hlist_add_head(&clk->child_node, &hw->core->clks);
+ hlist_add_head(&clk->clks_node, &hw->core->clks);
clk_prepare_unlock();
return clk;
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
- hlist_del(&clk->child_node);
+ hlist_del(&clk->clks_node);
clk_prepare_unlock();
kfree(clk);
struct clk_core *clk = container_of(ref, struct clk_core, ref);
int i = clk->num_parents;
+ lockdep_assert_held(&prepare_lock);
+
kfree(clk->parents);
while (--i >= 0)
kfree_const(clk->parent_names[i]);
clk_prepare_lock();
- hlist_del(&clk->child_node);
+ hlist_del(&clk->clks_node);
if (clk->min_rate > clk->core->req_rate ||
clk->max_rate < clk->core->req_rate)
clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
-/* of_clk_provider list locking helpers */
-void of_clk_lock(void)
-{
- mutex_lock(&of_clk_mutex);
-}
-
-void of_clk_unlock(void)
-{
- mutex_unlock(&of_clk_mutex);
-}
-
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
+ if (!clkspec)
+ return ERR_PTR(-EINVAL);
+
/* Check if we have such a provider in our array */
+ mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
clk = provider->get(clkspec, provider->data);
break;
}
}
+ mutex_unlock(&of_clk_mutex);
return clk;
}
+/**
+ * of_clk_get_from_provider() - Lookup a clock from a clock provider
+ * @clkspec: pointer to a clock specifier data structure
+ *
+ * This function looks up a struct clk from the registered list of clock
+ * providers, an input is a clock specifier data structure as returned
+ * from the of_parse_phandle_with_args() function call.
+ */
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- struct clk *clk;
-
- mutex_lock(&of_clk_mutex);
- clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
- mutex_unlock(&of_clk_mutex);
-
- return clk;
+ return __of_clk_get_from_provider(clkspec, NULL, __func__);
}
int of_clk_get_parent_count(struct device_node *np)