2 * Based on arch/arm/include/asm/cmpxchg.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
24 #include <asm/barrier.h>
26 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28 unsigned long ret, tmp;
32 asm volatile("// __xchg1\n"
34 " stlxrb %w1, %w3, %2\n"
36 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
41 asm volatile("// __xchg2\n"
43 " stlxrh %w1, %w3, %2\n"
45 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
50 asm volatile("// __xchg4\n"
52 " stlxr %w1, %w3, %2\n"
54 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
59 asm volatile("// __xchg8\n"
61 " stlxr %w1, %3, %2\n"
63 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
77 __typeof__(*(ptr)) __ret; \
78 __ret = (__typeof__(*(ptr))) \
79 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
83 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
84 unsigned long new, int size)
86 unsigned long oldval = 0, res;
91 asm volatile("// __cmpxchg1\n"
96 " stxrb %w0, %w4, %2\n"
98 : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
99 : "Ir" (old), "r" (new)
106 asm volatile("// __cmpxchg2\n"
111 " stxrh %w0, %w4, %2\n"
113 : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
114 : "Ir" (old), "r" (new)
121 asm volatile("// __cmpxchg4\n"
126 " stxr %w0, %w4, %2\n"
128 : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
129 : "Ir" (old), "r" (new)
136 asm volatile("// __cmpxchg8\n"
141 " stxr %w0, %4, %2\n"
143 : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
144 : "Ir" (old), "r" (new)
156 #define system_has_cmpxchg_double() 1
158 static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
159 unsigned long old1, unsigned long old2,
160 unsigned long new1, unsigned long new2, int size)
162 unsigned long loop, lost;
166 VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
168 asm volatile("// __cmpxchg_double8\n"
175 " stxp %w0, %5, %6, %2\n"
177 : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
178 : "r" (old1), "r"(old2), "r"(new1), "r"(new2));
188 static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
189 unsigned long old1, unsigned long old2,
190 unsigned long new1, unsigned long new2, int size)
195 ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
201 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
202 unsigned long new, int size)
207 ret = __cmpxchg(ptr, old, new, size);
213 #define cmpxchg(ptr, o, n) \
215 __typeof__(*(ptr)) __ret; \
216 __ret = (__typeof__(*(ptr))) \
217 __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
222 #define cmpxchg_local(ptr, o, n) \
224 __typeof__(*(ptr)) __ret; \
225 __ret = (__typeof__(*(ptr))) \
226 __cmpxchg((ptr), (unsigned long)(o), \
227 (unsigned long)(n), sizeof(*(ptr))); \
231 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
234 __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
235 (unsigned long)(o2), (unsigned long)(n1), \
236 (unsigned long)(n2), sizeof(*(ptr1)));\
240 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
243 __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
244 (unsigned long)(o2), (unsigned long)(n1), \
245 (unsigned long)(n2), sizeof(*(ptr1)));\
249 #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
250 #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
251 #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
252 #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
254 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
258 #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259 #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
261 #define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
263 #endif /* __ASM_CMPXCHG_H */