eabe5068d138a3607f567fc2a45f279b0ea332e1
[firefly-linux-kernel-4.4.55.git] / include / linux / spinlock_api_smp.h
1 #ifndef __LINUX_SPINLOCK_API_SMP_H
2 #define __LINUX_SPINLOCK_API_SMP_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
6 #endif
7
8 /*
9  * include/linux/spinlock_api_smp.h
10  *
11  * spinlock API declarations on SMP (and debug)
12  * (implemented in kernel/spinlock.c)
13  *
14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15  * Released under the General Public License (GPL).
16  */
17
18 int in_lock_functions(unsigned long addr);
19
20 #define assert_raw_spin_locked(x)       BUG_ON(!raw_spin_is_locked(x))
21
22 void __lockfunc _spin_lock(raw_spinlock_t *lock)        __acquires(lock);
23 void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
24                                                         __acquires(lock);
25 void __lockfunc
26 _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27                                                         __acquires(lock);
28 void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)     __acquires(lock);
29 void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)    __acquires(lock);
30
31 unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
32                                                         __acquires(lock);
33 unsigned long __lockfunc
34 _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
35                                                         __acquires(lock);
36 int __lockfunc _spin_trylock(raw_spinlock_t *lock);
37 int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
38 void __lockfunc _spin_unlock(raw_spinlock_t *lock)      __releases(lock);
39 void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)   __releases(lock);
40 void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)  __releases(lock);
41 void __lockfunc
42 _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
43                                                         __releases(lock);
44
45 #ifdef CONFIG_INLINE_SPIN_LOCK
46 #define _spin_lock(lock) __spin_lock(lock)
47 #endif
48
49 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
50 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
51 #endif
52
53 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
54 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
55 #endif
56
57 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
58 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
59 #endif
60
61 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
62 #define _spin_trylock(lock) __spin_trylock(lock)
63 #endif
64
65 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
66 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
67 #endif
68
69 #ifdef CONFIG_INLINE_SPIN_UNLOCK
70 #define _spin_unlock(lock) __spin_unlock(lock)
71 #endif
72
73 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
74 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
75 #endif
76
77 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
78 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
79 #endif
80
81 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
82 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
83 #endif
84
85 static inline int __spin_trylock(raw_spinlock_t *lock)
86 {
87         preempt_disable();
88         if (_raw_spin_trylock(lock)) {
89                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
90                 return 1;
91         }
92         preempt_enable();
93         return 0;
94 }
95
96 /*
97  * If lockdep is enabled then we use the non-preemption spin-ops
98  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
99  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
100  */
101 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
102
103 static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
104 {
105         unsigned long flags;
106
107         local_irq_save(flags);
108         preempt_disable();
109         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
110         /*
111          * On lockdep we dont want the hand-coded irq-enable of
112          * _raw_spin_lock_flags() code, because lockdep assumes
113          * that interrupts are not re-enabled during lock-acquire:
114          */
115 #ifdef CONFIG_LOCKDEP
116         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
117 #else
118         _raw_spin_lock_flags(lock, &flags);
119 #endif
120         return flags;
121 }
122
123 static inline void __spin_lock_irq(raw_spinlock_t *lock)
124 {
125         local_irq_disable();
126         preempt_disable();
127         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
128         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
129 }
130
131 static inline void __spin_lock_bh(raw_spinlock_t *lock)
132 {
133         local_bh_disable();
134         preempt_disable();
135         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
136         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
137 }
138
139 static inline void __spin_lock(raw_spinlock_t *lock)
140 {
141         preempt_disable();
142         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
143         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
144 }
145
146 #endif /* CONFIG_PREEMPT */
147
148 static inline void __spin_unlock(raw_spinlock_t *lock)
149 {
150         spin_release(&lock->dep_map, 1, _RET_IP_);
151         _raw_spin_unlock(lock);
152         preempt_enable();
153 }
154
155 static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
156                                             unsigned long flags)
157 {
158         spin_release(&lock->dep_map, 1, _RET_IP_);
159         _raw_spin_unlock(lock);
160         local_irq_restore(flags);
161         preempt_enable();
162 }
163
164 static inline void __spin_unlock_irq(raw_spinlock_t *lock)
165 {
166         spin_release(&lock->dep_map, 1, _RET_IP_);
167         _raw_spin_unlock(lock);
168         local_irq_enable();
169         preempt_enable();
170 }
171
172 static inline void __spin_unlock_bh(raw_spinlock_t *lock)
173 {
174         spin_release(&lock->dep_map, 1, _RET_IP_);
175         _raw_spin_unlock(lock);
176         preempt_enable_no_resched();
177         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
178 }
179
180 static inline int __spin_trylock_bh(raw_spinlock_t *lock)
181 {
182         local_bh_disable();
183         preempt_disable();
184         if (_raw_spin_trylock(lock)) {
185                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
186                 return 1;
187         }
188         preempt_enable_no_resched();
189         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
190         return 0;
191 }
192
193 #include <linux/rwlock_api_smp.h>
194
195 #endif /* __LINUX_SPINLOCK_API_SMP_H */