1 #ifndef __LINUX_SPINLOCK_API_SMP_H
2 #define __LINUX_SPINLOCK_API_SMP_H
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
9 * include/linux/spinlock_api_smp.h
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
18 int in_lock_functions(unsigned long addr);
20 #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
22 void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
26 _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
28 void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
31 unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
33 unsigned long __lockfunc
34 _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
36 int __lockfunc _spin_trylock(raw_spinlock_t *lock);
37 int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
38 void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
39 void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
40 void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
42 _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
45 #ifdef CONFIG_INLINE_SPIN_LOCK
46 #define _spin_lock(lock) __spin_lock(lock)
49 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
50 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
53 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
54 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
57 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
58 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
61 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
62 #define _spin_trylock(lock) __spin_trylock(lock)
65 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
66 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
69 #ifdef CONFIG_INLINE_SPIN_UNLOCK
70 #define _spin_unlock(lock) __spin_unlock(lock)
73 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
74 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
77 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
78 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
81 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
82 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
85 static inline int __spin_trylock(raw_spinlock_t *lock)
88 if (do_raw_spin_trylock(lock)) {
89 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
97 * If lockdep is enabled then we use the non-preemption spin-ops
98 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
99 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
101 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
103 static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
107 local_irq_save(flags);
109 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
111 * On lockdep we dont want the hand-coded irq-enable of
112 * do_raw_spin_lock_flags() code, because lockdep assumes
113 * that interrupts are not re-enabled during lock-acquire:
115 #ifdef CONFIG_LOCKDEP
116 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
118 do_raw_spin_lock_flags(lock, &flags);
123 static inline void __spin_lock_irq(raw_spinlock_t *lock)
127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
128 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
131 static inline void __spin_lock_bh(raw_spinlock_t *lock)
135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
136 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
139 static inline void __spin_lock(raw_spinlock_t *lock)
142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
143 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
146 #endif /* CONFIG_PREEMPT */
148 static inline void __spin_unlock(raw_spinlock_t *lock)
150 spin_release(&lock->dep_map, 1, _RET_IP_);
151 do_raw_spin_unlock(lock);
155 static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
158 spin_release(&lock->dep_map, 1, _RET_IP_);
159 do_raw_spin_unlock(lock);
160 local_irq_restore(flags);
164 static inline void __spin_unlock_irq(raw_spinlock_t *lock)
166 spin_release(&lock->dep_map, 1, _RET_IP_);
167 do_raw_spin_unlock(lock);
172 static inline void __spin_unlock_bh(raw_spinlock_t *lock)
174 spin_release(&lock->dep_map, 1, _RET_IP_);
175 do_raw_spin_unlock(lock);
176 preempt_enable_no_resched();
177 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
180 static inline int __spin_trylock_bh(raw_spinlock_t *lock)
184 if (do_raw_spin_trylock(lock)) {
185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
188 preempt_enable_no_resched();
189 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
193 #include <linux/rwlock_api_smp.h>
195 #endif /* __LINUX_SPINLOCK_API_SMP_H */