s390/rwlock: use directed yield for write-locked rwlocks
[firefly-linux-kernel-4.4.55.git] / arch / s390 / include / asm / spinlock.h
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13
14 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
16 extern int spin_retry;
17
18 static inline int
19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20 {
21         unsigned int old_expected = old;
22
23         asm volatile(
24                 "       cs      %0,%3,%1"
25                 : "=d" (old), "=Q" (*lock)
26                 : "0" (old), "d" (new), "Q" (*lock)
27                 : "cc", "memory" );
28         return old == old_expected;
29 }
30
31 /*
32  * Simple spin lock operations.  There are two variants, one clears IRQ's
33  * on the local processor, one does not.
34  *
35  * We make no fairness assumptions. They have a cost.
36  *
37  * (the type definitions are in asm/spinlock_types.h)
38  */
39
40 void arch_lock_relax(unsigned int cpu);
41
42 void arch_spin_lock_wait(arch_spinlock_t *);
43 int arch_spin_trylock_retry(arch_spinlock_t *);
44 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
45
46 static inline void arch_spin_relax(arch_spinlock_t *lock)
47 {
48         arch_lock_relax(lock->lock);
49 }
50
51 static inline u32 arch_spin_lockval(int cpu)
52 {
53         return ~cpu;
54 }
55
56 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
57 {
58         return lock.lock == 0;
59 }
60
61 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
62 {
63         return ACCESS_ONCE(lp->lock) != 0;
64 }
65
66 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 {
68         barrier();
69         return likely(arch_spin_value_unlocked(*lp) &&
70                       _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 }
72
73 static inline void arch_spin_lock(arch_spinlock_t *lp)
74 {
75         if (!arch_spin_trylock_once(lp))
76                 arch_spin_lock_wait(lp);
77 }
78
79 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80                                         unsigned long flags)
81 {
82         if (!arch_spin_trylock_once(lp))
83                 arch_spin_lock_wait_flags(lp, flags);
84 }
85
86 static inline int arch_spin_trylock(arch_spinlock_t *lp)
87 {
88         if (!arch_spin_trylock_once(lp))
89                 return arch_spin_trylock_retry(lp);
90         return 1;
91 }
92
93 static inline void arch_spin_unlock(arch_spinlock_t *lp)
94 {
95         typecheck(unsigned int, lp->lock);
96         asm volatile(
97                 __ASM_BARRIER
98                 "st     %1,%0\n"
99                 : "+Q" (lp->lock)
100                 : "d" (0)
101                 : "cc", "memory");
102 }
103
104 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
105 {
106         while (arch_spin_is_locked(lock))
107                 arch_spin_relax(lock);
108 }
109
110 /*
111  * Read-write spinlocks, allowing multiple readers
112  * but only one writer.
113  *
114  * NOTE! it is quite common to have readers in interrupts
115  * but no interrupt writers. For those circumstances we
116  * can "mix" irq-safe locks - any writer needs to get a
117  * irq-safe write-lock, but readers can get non-irqsafe
118  * read-locks.
119  */
120
121 /**
122  * read_can_lock - would read_trylock() succeed?
123  * @lock: the rwlock in question.
124  */
125 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
126
127 /**
128  * write_can_lock - would write_trylock() succeed?
129  * @lock: the rwlock in question.
130  */
131 #define arch_write_can_lock(x) ((x)->lock == 0)
132
133 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
134 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
135 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
136 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
137 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
138 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
139
140 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
141 {
142         unsigned int old = ACCESS_ONCE(rw->lock);
143         return likely((int) old >= 0 &&
144                       _raw_compare_and_swap(&rw->lock, old, old + 1));
145 }
146
147 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
148 {
149         unsigned int old = ACCESS_ONCE(rw->lock);
150         return likely(old == 0 &&
151                       _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
152 }
153
154 static inline void arch_read_lock(arch_rwlock_t *rw)
155 {
156         if (!arch_read_trylock_once(rw))
157                 _raw_read_lock_wait(rw);
158 }
159
160 static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
161 {
162         if (!arch_read_trylock_once(rw))
163                 _raw_read_lock_wait_flags(rw, flags);
164 }
165
166 static inline void arch_read_unlock(arch_rwlock_t *rw)
167 {
168         unsigned int old;
169
170         do {
171                 old = ACCESS_ONCE(rw->lock);
172         } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
173 }
174
175 static inline void arch_write_lock(arch_rwlock_t *rw)
176 {
177         if (!arch_write_trylock_once(rw))
178                 _raw_write_lock_wait(rw);
179         rw->owner = SPINLOCK_LOCKVAL;
180 }
181
182 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
183 {
184         if (!arch_write_trylock_once(rw))
185                 _raw_write_lock_wait_flags(rw, flags);
186         rw->owner = SPINLOCK_LOCKVAL;
187 }
188
189 static inline void arch_write_unlock(arch_rwlock_t *rw)
190 {
191         typecheck(unsigned int, rw->lock);
192
193         rw->owner = 0;
194         asm volatile(
195                 __ASM_BARRIER
196                 "st     %1,%0\n"
197                 : "+Q" (rw->lock)
198                 : "d" (0)
199                 : "cc", "memory");
200 }
201
202 static inline int arch_read_trylock(arch_rwlock_t *rw)
203 {
204         if (!arch_read_trylock_once(rw))
205                 return _raw_read_trylock_retry(rw);
206         return 1;
207 }
208
209 static inline int arch_write_trylock(arch_rwlock_t *rw)
210 {
211         if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
212                 return 0;
213         rw->owner = SPINLOCK_LOCKVAL;
214         return 1;
215 }
216
217 static inline void arch_read_relax(arch_rwlock_t *rw)
218 {
219         arch_lock_relax(rw->owner);
220 }
221
222 static inline void arch_write_relax(arch_rwlock_t *rw)
223 {
224         arch_lock_relax(rw->owner);
225 }
226
227 #endif /* __ASM_SPINLOCK_H */