static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
+#define LOCK() do { WARN_ON(in_irq()); if (!irqs_disabled()) spin_lock_bh(&clockfw_lock); } while (0)
+#define UNLOCK() do { if (!irqs_disabled()) spin_unlock_bh(&clockfw_lock); } while (0)
static int __clk_enable(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
ret = __clk_enable(clk);
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
return ret;
}
if (clk == NULL || IS_ERR(clk))
return;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
if (clk->usecount == 0) {
printk(KERN_ERR "Trying disable clock %s with 0 usecount\n", clk->name);
WARN_ON(1);
__clk_disable(clk);
out:
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
}
EXPORT_SYMBOL(clk_disable);
if (clk == NULL || IS_ERR(clk))
return 0;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
ret = clk->rate;
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
return ret;
}
if (clk == NULL || IS_ERR(clk))
return ret;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
ret = __clk_round_rate(clk, rate);
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
return ret;
}
if (clk == NULL || IS_ERR(clk))
return ret;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
if (rate == clk->rate) {
ret = 0;
goto out;
propagate_rate(clk);
}
out:
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
return ret;
}
if (clk->set_parent == NULL)
return ret;
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
if (clk->usecount == 0) {
ret = clk->set_parent(clk, parent);
if (ret == 0) {
}
} else
ret = -EBUSY;
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
return ret;
}
void clk_recalculate_root_clocks(void)
{
- WARN_ON(in_irq());
- spin_lock_bh(&clockfw_lock);
+ LOCK();
recalculate_root_clocks();
- spin_unlock_bh(&clockfw_lock);
+ UNLOCK();
}
/**