4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems.
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/semaphore.h>
11 #define CREATE_TRACE_POINTS
12 #include <linux/smp_lock.h>
15 * The 'big kernel lock'
17 * This spinlock is taken and released recursively by lock_kernel()
18 * and unlock_kernel(). It is transparently dropped and reacquired
19 * over schedule(). It is used to protect legacy code that hasn't
20 * been migrated to a proper locking design yet.
22 * Don't use in new code.
24 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
28 * Acquire/release the underlying lock from the scheduler.
30 * This is called with preemption disabled, and should
31 * return an error value if it cannot get the lock and
32 * TIF_NEED_RESCHED gets set.
34 * If it successfully gets the lock, it should increment
35 * the preemption count like any spinlock does.
37 * (This works on UP too - _raw_spin_trylock will never
38 * return false in that case)
40 int __lockfunc __reacquire_kernel_lock(void)
42 while (!_raw_spin_trylock(&kernel_flag)) {
51 void __lockfunc __release_kernel_lock(void)
53 _raw_spin_unlock(&kernel_flag);
54 preempt_enable_no_resched();
58 * These are the BKL spinlocks - we try to be polite about preemption.
59 * If SMP is not on (ie UP preemption), this all goes away because the
60 * _raw_spin_trylock() will always succeed.
63 static inline void __lock_kernel(void)
66 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
68 * If preemption was disabled even before this
69 * was called, there's nothing we can be polite
72 if (preempt_count() > 1) {
73 _raw_spin_lock(&kernel_flag);
78 * Otherwise, let's wait for the kernel lock
79 * with preemption enabled..
83 while (spin_is_locked(&kernel_flag))
86 } while (!_raw_spin_trylock(&kernel_flag));
93 * Non-preemption case - just get the spinlock
95 static inline void __lock_kernel(void)
97 _raw_spin_lock(&kernel_flag);
101 static inline void __unlock_kernel(void)
104 * the BKL is not covered by lockdep, so we open-code the
105 * unlocking sequence (and thus avoid the dep-chain ops):
107 _raw_spin_unlock(&kernel_flag);
112 * Getting the big kernel lock.
114 * This cannot happen asynchronously, so we only need to
115 * worry about other CPU's.
117 void __lockfunc _lock_kernel(void)
119 int depth = current->lock_depth+1;
122 current->lock_depth = depth;
125 void __lockfunc _unlock_kernel(void)
127 BUG_ON(current->lock_depth < 0);
128 if (likely(--current->lock_depth < 0))
132 EXPORT_SYMBOL(_lock_kernel);
133 EXPORT_SYMBOL(_unlock_kernel);