b491d5e963cfd38d63a37b93c933996eb991adde
[firefly-linux-kernel-4.4.55.git] / arch / s390 / include / asm / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 /*
5  * Copyright 1999,2009 IBM Corp.
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *            Denis Joseph Barrow,
8  *            Arnd Bergmann <arndb@de.ibm.com>,
9  *
10  * Atomic operations that C can't guarantee us.
11  * Useful for resource counting etc.
12  * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13  *
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18
19 #define ATOMIC_INIT(i)  { (i) }
20
21 #ifdef __KERNEL__
22
23 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
24
25 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
26         typeof(ptr->counter) old_val, new_val;                          \
27         asm volatile(                                                   \
28                 "       l       %0,%2\n"                                \
29                 "0:     lr      %1,%0\n"                                \
30                 op_string "     %1,%3\n"                                \
31                 "       cs      %0,%1,%2\n"                             \
32                 "       jl      0b"                                     \
33                 : "=&d" (old_val), "=&d" (new_val),                     \
34                   "=Q" (((atomic_t *)(ptr))->counter)                   \
35                 : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
36                 : "cc", "memory");                                      \
37         new_val;                                                        \
38 })
39
40 #else /* __GNUC__ */
41
42 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
43         typeof(ptr->counter) old_val, new_val;                          \
44         asm volatile(                                                   \
45                 "       l       %0,0(%3)\n"                             \
46                 "0:     lr      %1,%0\n"                                \
47                 op_string "     %1,%4\n"                                \
48                 "       cs      %0,%1,0(%3)\n"                          \
49                 "       jl      0b"                                     \
50                 : "=&d" (old_val), "=&d" (new_val),                     \
51                   "=m" (((atomic_t *)(ptr))->counter)                   \
52                 : "a" (ptr), "d" (op_val),                              \
53                   "m" (((atomic_t *)(ptr))->counter)                    \
54                 : "cc", "memory");                                      \
55         new_val;                                                        \
56 })
57
58 #endif /* __GNUC__ */
59
60 static inline int atomic_read(const atomic_t *v)
61 {
62         barrier();
63         return v->counter;
64 }
65
66 static inline void atomic_set(atomic_t *v, int i)
67 {
68         v->counter = i;
69         barrier();
70 }
71
72 static __inline__ int atomic_add_return(int i, atomic_t * v)
73 {
74         return __CS_LOOP(v, i, "ar");
75 }
76 #define atomic_add(_i, _v)              atomic_add_return(_i, _v)
77 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
78 #define atomic_inc(_v)                  atomic_add_return(1, _v)
79 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
80 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
81
82 static __inline__ int atomic_sub_return(int i, atomic_t * v)
83 {
84         return __CS_LOOP(v, i, "sr");
85 }
86 #define atomic_sub(_i, _v)              atomic_sub_return(_i, _v)
87 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
88 #define atomic_dec(_v)                  atomic_sub_return(1, _v)
89 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
90 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
91
92 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
93 {
94                __CS_LOOP(v, ~mask, "nr");
95 }
96
97 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
98 {
99                __CS_LOOP(v, mask, "or");
100 }
101
102 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
103
104 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
105 {
106 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
107         asm volatile(
108                 "       cs      %0,%2,%1"
109                 : "+d" (old), "=Q" (v->counter)
110                 : "d" (new), "Q" (v->counter)
111                 : "cc", "memory");
112 #else /* __GNUC__ */
113         asm volatile(
114                 "       cs      %0,%3,0(%2)"
115                 : "+d" (old), "=m" (v->counter)
116                 : "a" (v), "d" (new), "m" (v->counter)
117                 : "cc", "memory");
118 #endif /* __GNUC__ */
119         return old;
120 }
121
122 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
123 {
124         int c, old;
125         c = atomic_read(v);
126         for (;;) {
127                 if (unlikely(c == u))
128                         break;
129                 old = atomic_cmpxchg(v, c, c + a);
130                 if (likely(old == c))
131                         break;
132                 c = old;
133         }
134         return c != u;
135 }
136
137 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
138
139 #undef __CS_LOOP
140
141 #define ATOMIC64_INIT(i)  { (i) }
142
143 #ifdef CONFIG_64BIT
144
145 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
146
147 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
148         typeof(ptr->counter) old_val, new_val;                          \
149         asm volatile(                                                   \
150                 "       lg      %0,%2\n"                                \
151                 "0:     lgr     %1,%0\n"                                \
152                 op_string "     %1,%3\n"                                \
153                 "       csg     %0,%1,%2\n"                             \
154                 "       jl      0b"                                     \
155                 : "=&d" (old_val), "=&d" (new_val),                     \
156                   "=Q" (((atomic_t *)(ptr))->counter)                   \
157                 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
158                 : "cc", "memory" );                                     \
159         new_val;                                                        \
160 })
161
162 #else /* __GNUC__ */
163
164 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
165         typeof(ptr->counter) old_val, new_val;                          \
166         asm volatile(                                                   \
167                 "       lg      %0,0(%3)\n"                             \
168                 "0:     lgr     %1,%0\n"                                \
169                 op_string "     %1,%4\n"                                \
170                 "       csg     %0,%1,0(%3)\n"                          \
171                 "       jl      0b"                                     \
172                 : "=&d" (old_val), "=&d" (new_val),                     \
173                   "=m" (((atomic_t *)(ptr))->counter)                   \
174                 : "a" (ptr), "d" (op_val),                              \
175                   "m" (((atomic_t *)(ptr))->counter)                    \
176                 : "cc", "memory" );                                     \
177         new_val;                                                        \
178 })
179
180 #endif /* __GNUC__ */
181
182 static inline long long atomic64_read(const atomic64_t *v)
183 {
184         barrier();
185         return v->counter;
186 }
187
188 static inline void atomic64_set(atomic64_t *v, long long i)
189 {
190         v->counter = i;
191         barrier();
192 }
193
194 static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
195 {
196         return __CSG_LOOP(v, i, "agr");
197 }
198
199 static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
200 {
201         return __CSG_LOOP(v, i, "sgr");
202 }
203
204 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
205 {
206                __CSG_LOOP(v, ~mask, "ngr");
207 }
208
209 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
210 {
211                __CSG_LOOP(v, mask, "ogr");
212 }
213
214 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
215
216 static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
217                                              long long old, long long new)
218 {
219 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
220         asm volatile(
221                 "       csg     %0,%2,%1"
222                 : "+d" (old), "=Q" (v->counter)
223                 : "d" (new), "Q" (v->counter)
224                 : "cc", "memory");
225 #else /* __GNUC__ */
226         asm volatile(
227                 "       csg     %0,%3,0(%2)"
228                 : "+d" (old), "=m" (v->counter)
229                 : "a" (v), "d" (new), "m" (v->counter)
230                 : "cc", "memory");
231 #endif /* __GNUC__ */
232         return old;
233 }
234
235 #undef __CSG_LOOP
236
237 #else /* CONFIG_64BIT */
238
239 typedef struct {
240         long long counter;
241 } atomic64_t;
242
243 static inline long long atomic64_read(const atomic64_t *v)
244 {
245         register_pair rp;
246
247         asm volatile(
248                 "       lm      %0,%N0,0(%1)"
249                 : "=&d" (rp)
250                 : "a" (&v->counter), "m" (v->counter)
251                 );
252         return rp.pair;
253 }
254
255 static inline void atomic64_set(atomic64_t *v, long long i)
256 {
257         register_pair rp = {.pair = i};
258
259         asm volatile(
260                 "       stm     %1,%N1,0(%2)"
261                 : "=m" (v->counter)
262                 : "d" (rp), "a" (&v->counter)
263                 );
264 }
265
266 static inline long long atomic64_xchg(atomic64_t *v, long long new)
267 {
268         register_pair rp_new = {.pair = new};
269         register_pair rp_old;
270
271         asm volatile(
272                 "       lm      %0,%N0,0(%2)\n"
273                 "0:     cds     %0,%3,0(%2)\n"
274                 "       jl      0b\n"
275                 : "=&d" (rp_old), "+m" (v->counter)
276                 : "a" (&v->counter), "d" (rp_new)
277                 : "cc");
278         return rp_old.pair;
279 }
280
281 static inline long long atomic64_cmpxchg(atomic64_t *v,
282                                          long long old, long long new)
283 {
284         register_pair rp_old = {.pair = old};
285         register_pair rp_new = {.pair = new};
286
287         asm volatile(
288                 "       cds     %0,%3,0(%2)"
289                 : "+&d" (rp_old), "+m" (v->counter)
290                 : "a" (&v->counter), "d" (rp_new)
291                 : "cc");
292         return rp_old.pair;
293 }
294
295
296 static inline long long atomic64_add_return(long long i, atomic64_t *v)
297 {
298         long long old, new;
299
300         do {
301                 old = atomic64_read(v);
302                 new = old + i;
303         } while (atomic64_cmpxchg(v, old, new) != old);
304         return new;
305 }
306
307 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
308 {
309         long long old, new;
310
311         do {
312                 old = atomic64_read(v);
313                 new = old - i;
314         } while (atomic64_cmpxchg(v, old, new) != old);
315         return new;
316 }
317
318 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
319 {
320         long long old, new;
321
322         do {
323                 old = atomic64_read(v);
324                 new = old | mask;
325         } while (atomic64_cmpxchg(v, old, new) != old);
326 }
327
328 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
329 {
330         long long old, new;
331
332         do {
333                 old = atomic64_read(v);
334                 new = old & mask;
335         } while (atomic64_cmpxchg(v, old, new) != old);
336 }
337
338 #endif /* CONFIG_64BIT */
339
340 static __inline__ int atomic64_add_unless(atomic64_t *v,
341                                           long long a, long long u)
342 {
343         long long c, old;
344         c = atomic64_read(v);
345         for (;;) {
346                 if (unlikely(c == u))
347                         break;
348                 old = atomic64_cmpxchg(v, c, c + a);
349                 if (likely(old == c))
350                         break;
351                 c = old;
352         }
353         return c != u;
354 }
355
356 #define atomic64_add(_i, _v)            atomic64_add_return(_i, _v)
357 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
358 #define atomic64_inc(_v)                atomic64_add_return(1, _v)
359 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
360 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
361 #define atomic64_sub(_i, _v)            atomic64_sub_return(_i, _v)
362 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
363 #define atomic64_dec(_v)                atomic64_sub_return(1, _v)
364 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
365 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
366 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
367
368 #define smp_mb__before_atomic_dec()     smp_mb()
369 #define smp_mb__after_atomic_dec()      smp_mb()
370 #define smp_mb__before_atomic_inc()     smp_mb()
371 #define smp_mb__after_atomic_inc()      smp_mb()
372
373 #include <asm-generic/atomic-long.h>
374 #endif /* __KERNEL__ */
375 #endif /* __ARCH_S390_ATOMIC__  */