x86: this_cpu_cmpxchg and this_cpu_xchg operations
[firefly-linux-kernel-4.4.55.git] / arch / x86 / include / asm / percpu.h
1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
3
4 #ifdef CONFIG_X86_64
5 #define __percpu_seg            gs
6 #define __percpu_mov_op         movq
7 #else
8 #define __percpu_seg            fs
9 #define __percpu_mov_op         movl
10 #endif
11
12 #ifdef __ASSEMBLY__
13
14 /*
15  * PER_CPU finds an address of a per-cpu variable.
16  *
17  * Args:
18  *    var - variable name
19  *    reg - 32bit register
20  *
21  * The resulting address is stored in the "reg" argument.
22  *
23  * Example:
24  *    PER_CPU(cpu_gdt_descr, %ebx)
25  */
26 #ifdef CONFIG_SMP
27 #define PER_CPU(var, reg)                                               \
28         __percpu_mov_op %__percpu_seg:this_cpu_off, reg;                \
29         lea var(reg), reg
30 #define PER_CPU_VAR(var)        %__percpu_seg:var
31 #else /* ! SMP */
32 #define PER_CPU(var, reg)       __percpu_mov_op $var, reg
33 #define PER_CPU_VAR(var)        var
34 #endif  /* SMP */
35
36 #ifdef CONFIG_X86_64_SMP
37 #define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
38 #else
39 #define INIT_PER_CPU_VAR(var)  var
40 #endif
41
42 #else /* ...!ASSEMBLY */
43
44 #include <linux/kernel.h>
45 #include <linux/stringify.h>
46
47 #ifdef CONFIG_SMP
48 #define __percpu_arg(x)         "%%"__stringify(__percpu_seg)":%P" #x
49 #define __my_cpu_offset         percpu_read(this_cpu_off)
50
51 /*
52  * Compared to the generic __my_cpu_offset version, the following
53  * saves one instruction and avoids clobbering a temp register.
54  */
55 #define __this_cpu_ptr(ptr)                             \
56 ({                                                      \
57         unsigned long tcp_ptr__;                        \
58         __verify_pcpu_ptr(ptr);                         \
59         asm volatile("add " __percpu_arg(1) ", %0"      \
60                      : "=r" (tcp_ptr__)                 \
61                      : "m" (this_cpu_off), "0" (ptr));  \
62         (typeof(*(ptr)) __kernel __force *)tcp_ptr__;   \
63 })
64 #else
65 #define __percpu_arg(x)         "%P" #x
66 #endif
67
68 /*
69  * Initialized pointers to per-cpu variables needed for the boot
70  * processor need to use these macros to get the proper address
71  * offset from __per_cpu_load on SMP.
72  *
73  * There also must be an entry in vmlinux_64.lds.S
74  */
75 #define DECLARE_INIT_PER_CPU(var) \
76        extern typeof(var) init_per_cpu_var(var)
77
78 #ifdef CONFIG_X86_64_SMP
79 #define init_per_cpu_var(var)  init_per_cpu__##var
80 #else
81 #define init_per_cpu_var(var)  var
82 #endif
83
84 /* For arch-specific code, we can use direct single-insn ops (they
85  * don't give an lvalue though). */
86 extern void __bad_percpu_size(void);
87
88 #define percpu_to_op(op, var, val)                      \
89 do {                                                    \
90         typedef typeof(var) pto_T__;                    \
91         if (0) {                                        \
92                 pto_T__ pto_tmp__;                      \
93                 pto_tmp__ = (val);                      \
94                 (void)pto_tmp__;                        \
95         }                                               \
96         switch (sizeof(var)) {                          \
97         case 1:                                         \
98                 asm(op "b %1,"__percpu_arg(0)           \
99                     : "+m" (var)                        \
100                     : "qi" ((pto_T__)(val)));           \
101                 break;                                  \
102         case 2:                                         \
103                 asm(op "w %1,"__percpu_arg(0)           \
104                     : "+m" (var)                        \
105                     : "ri" ((pto_T__)(val)));           \
106                 break;                                  \
107         case 4:                                         \
108                 asm(op "l %1,"__percpu_arg(0)           \
109                     : "+m" (var)                        \
110                     : "ri" ((pto_T__)(val)));           \
111                 break;                                  \
112         case 8:                                         \
113                 asm(op "q %1,"__percpu_arg(0)           \
114                     : "+m" (var)                        \
115                     : "re" ((pto_T__)(val)));           \
116                 break;                                  \
117         default: __bad_percpu_size();                   \
118         }                                               \
119 } while (0)
120
121 /*
122  * Generate a percpu add to memory instruction and optimize code
123  * if one is added or subtracted.
124  */
125 #define percpu_add_op(var, val)                                         \
126 do {                                                                    \
127         typedef typeof(var) pao_T__;                                    \
128         const int pao_ID__ = (__builtin_constant_p(val) &&              \
129                               ((val) == 1 || (val) == -1)) ? (val) : 0; \
130         if (0) {                                                        \
131                 pao_T__ pao_tmp__;                                      \
132                 pao_tmp__ = (val);                                      \
133                 (void)pao_tmp__;                                        \
134         }                                                               \
135         switch (sizeof(var)) {                                          \
136         case 1:                                                         \
137                 if (pao_ID__ == 1)                                      \
138                         asm("incb "__percpu_arg(0) : "+m" (var));       \
139                 else if (pao_ID__ == -1)                                \
140                         asm("decb "__percpu_arg(0) : "+m" (var));       \
141                 else                                                    \
142                         asm("addb %1, "__percpu_arg(0)                  \
143                             : "+m" (var)                                \
144                             : "qi" ((pao_T__)(val)));                   \
145                 break;                                                  \
146         case 2:                                                         \
147                 if (pao_ID__ == 1)                                      \
148                         asm("incw "__percpu_arg(0) : "+m" (var));       \
149                 else if (pao_ID__ == -1)                                \
150                         asm("decw "__percpu_arg(0) : "+m" (var));       \
151                 else                                                    \
152                         asm("addw %1, "__percpu_arg(0)                  \
153                             : "+m" (var)                                \
154                             : "ri" ((pao_T__)(val)));                   \
155                 break;                                                  \
156         case 4:                                                         \
157                 if (pao_ID__ == 1)                                      \
158                         asm("incl "__percpu_arg(0) : "+m" (var));       \
159                 else if (pao_ID__ == -1)                                \
160                         asm("decl "__percpu_arg(0) : "+m" (var));       \
161                 else                                                    \
162                         asm("addl %1, "__percpu_arg(0)                  \
163                             : "+m" (var)                                \
164                             : "ri" ((pao_T__)(val)));                   \
165                 break;                                                  \
166         case 8:                                                         \
167                 if (pao_ID__ == 1)                                      \
168                         asm("incq "__percpu_arg(0) : "+m" (var));       \
169                 else if (pao_ID__ == -1)                                \
170                         asm("decq "__percpu_arg(0) : "+m" (var));       \
171                 else                                                    \
172                         asm("addq %1, "__percpu_arg(0)                  \
173                             : "+m" (var)                                \
174                             : "re" ((pao_T__)(val)));                   \
175                 break;                                                  \
176         default: __bad_percpu_size();                                   \
177         }                                                               \
178 } while (0)
179
180 #define percpu_from_op(op, var, constraint)             \
181 ({                                                      \
182         typeof(var) pfo_ret__;                          \
183         switch (sizeof(var)) {                          \
184         case 1:                                         \
185                 asm(op "b "__percpu_arg(1)",%0"         \
186                     : "=q" (pfo_ret__)                  \
187                     : constraint);                      \
188                 break;                                  \
189         case 2:                                         \
190                 asm(op "w "__percpu_arg(1)",%0"         \
191                     : "=r" (pfo_ret__)                  \
192                     : constraint);                      \
193                 break;                                  \
194         case 4:                                         \
195                 asm(op "l "__percpu_arg(1)",%0"         \
196                     : "=r" (pfo_ret__)                  \
197                     : constraint);                      \
198                 break;                                  \
199         case 8:                                         \
200                 asm(op "q "__percpu_arg(1)",%0"         \
201                     : "=r" (pfo_ret__)                  \
202                     : constraint);                      \
203                 break;                                  \
204         default: __bad_percpu_size();                   \
205         }                                               \
206         pfo_ret__;                                      \
207 })
208
209 #define percpu_unary_op(op, var)                        \
210 ({                                                      \
211         switch (sizeof(var)) {                          \
212         case 1:                                         \
213                 asm(op "b "__percpu_arg(0)              \
214                     : "+m" (var));                      \
215                 break;                                  \
216         case 2:                                         \
217                 asm(op "w "__percpu_arg(0)              \
218                     : "+m" (var));                      \
219                 break;                                  \
220         case 4:                                         \
221                 asm(op "l "__percpu_arg(0)              \
222                     : "+m" (var));                      \
223                 break;                                  \
224         case 8:                                         \
225                 asm(op "q "__percpu_arg(0)              \
226                     : "+m" (var));                      \
227                 break;                                  \
228         default: __bad_percpu_size();                   \
229         }                                               \
230 })
231
232 /*
233  * Add return operation
234  */
235 #define percpu_add_return_op(var, val)                                  \
236 ({                                                                      \
237         typeof(var) paro_ret__ = val;                                   \
238         switch (sizeof(var)) {                                          \
239         case 1:                                                         \
240                 asm("xaddb %0, "__percpu_arg(1)                         \
241                             : "+q" (paro_ret__), "+m" (var)             \
242                             : : "memory");                              \
243                 break;                                                  \
244         case 2:                                                         \
245                 asm("xaddw %0, "__percpu_arg(1)                         \
246                             : "+r" (paro_ret__), "+m" (var)             \
247                             : : "memory");                              \
248                 break;                                                  \
249         case 4:                                                         \
250                 asm("xaddl %0, "__percpu_arg(1)                         \
251                             : "+r" (paro_ret__), "+m" (var)             \
252                             : : "memory");                              \
253                 break;                                                  \
254         case 8:                                                         \
255                 asm("xaddq %0, "__percpu_arg(1)                         \
256                             : "+re" (paro_ret__), "+m" (var)            \
257                             : : "memory");                              \
258                 break;                                                  \
259         default: __bad_percpu_size();                                   \
260         }                                                               \
261         paro_ret__ += val;                                              \
262         paro_ret__;                                                     \
263 })
264
265 /*
266  * Beware: xchg on x86 has an implied lock prefix. There will be the cost of
267  * full lock semantics even though they are not needed.
268  */
269 #define percpu_xchg_op(var, nval)                                       \
270 ({                                                                      \
271         typeof(var) pxo_ret__;                                          \
272         typeof(var) pxo_new__ = (nval);                                 \
273         switch (sizeof(var)) {                                          \
274         case 1:                                                         \
275                 asm("xchgb %2, "__percpu_arg(1)                         \
276                             : "=a" (pxo_ret__), "+m" (var)              \
277                             : "q" (pxo_new__)                           \
278                             : "memory");                                \
279                 break;                                                  \
280         case 2:                                                         \
281                 asm("xchgw %2, "__percpu_arg(1)                         \
282                             : "=a" (pxo_ret__), "+m" (var)              \
283                             : "r" (pxo_new__)                           \
284                             : "memory");                                \
285                 break;                                                  \
286         case 4:                                                         \
287                 asm("xchgl %2, "__percpu_arg(1)                         \
288                             : "=a" (pxo_ret__), "+m" (var)              \
289                             : "r" (pxo_new__)                           \
290                             : "memory");                                \
291                 break;                                                  \
292         case 8:                                                         \
293                 asm("xchgq %2, "__percpu_arg(1)                         \
294                             : "=a" (pxo_ret__), "+m" (var)              \
295                             : "r" (pxo_new__)                           \
296                             : "memory");                                \
297                 break;                                                  \
298         default: __bad_percpu_size();                                   \
299         }                                                               \
300         pxo_ret__;                                                      \
301 })
302
303 /*
304  * cmpxchg has no such implied lock semantics as a result it is much
305  * more efficient for cpu local operations.
306  */
307 #define percpu_cmpxchg_op(var, oval, nval)                              \
308 ({                                                                      \
309         typeof(var) pco_ret__;                                          \
310         typeof(var) pco_old__ = (oval);                                 \
311         typeof(var) pco_new__ = (nval);                                 \
312         switch (sizeof(var)) {                                          \
313         case 1:                                                         \
314                 asm("cmpxchgb %2, "__percpu_arg(1)                      \
315                             : "=a" (pco_ret__), "+m" (var)              \
316                             : "q" (pco_new__), "0" (pco_old__)          \
317                             : "memory");                                \
318                 break;                                                  \
319         case 2:                                                         \
320                 asm("cmpxchgw %2, "__percpu_arg(1)                      \
321                             : "=a" (pco_ret__), "+m" (var)              \
322                             : "r" (pco_new__), "0" (pco_old__)          \
323                             : "memory");                                \
324                 break;                                                  \
325         case 4:                                                         \
326                 asm("cmpxchgl %2, "__percpu_arg(1)                      \
327                             : "=a" (pco_ret__), "+m" (var)              \
328                             : "r" (pco_new__), "0" (pco_old__)          \
329                             : "memory");                                \
330                 break;                                                  \
331         case 8:                                                         \
332                 asm("cmpxchgq %2, "__percpu_arg(1)                      \
333                             : "=a" (pco_ret__), "+m" (var)              \
334                             : "r" (pco_new__), "0" (pco_old__)          \
335                             : "memory");                                \
336                 break;                                                  \
337         default: __bad_percpu_size();                                   \
338         }                                                               \
339         pco_ret__;                                                      \
340 })
341
342 /*
343  * percpu_read() makes gcc load the percpu variable every time it is
344  * accessed while percpu_read_stable() allows the value to be cached.
345  * percpu_read_stable() is more efficient and can be used if its value
346  * is guaranteed to be valid across cpus.  The current users include
347  * get_current() and get_thread_info() both of which are actually
348  * per-thread variables implemented as per-cpu variables and thus
349  * stable for the duration of the respective task.
350  */
351 #define percpu_read(var)                percpu_from_op("mov", var, "m" (var))
352 #define percpu_read_stable(var)         percpu_from_op("mov", var, "p" (&(var)))
353 #define percpu_write(var, val)          percpu_to_op("mov", var, val)
354 #define percpu_add(var, val)            percpu_add_op(var, val)
355 #define percpu_sub(var, val)            percpu_add_op(var, -(val))
356 #define percpu_and(var, val)            percpu_to_op("and", var, val)
357 #define percpu_or(var, val)             percpu_to_op("or", var, val)
358 #define percpu_xor(var, val)            percpu_to_op("xor", var, val)
359 #define percpu_inc(var)         percpu_unary_op("inc", var)
360
361 #define __this_cpu_read_1(pcp)          percpu_from_op("mov", (pcp), "m"(pcp))
362 #define __this_cpu_read_2(pcp)          percpu_from_op("mov", (pcp), "m"(pcp))
363 #define __this_cpu_read_4(pcp)          percpu_from_op("mov", (pcp), "m"(pcp))
364
365 #define __this_cpu_write_1(pcp, val)    percpu_to_op("mov", (pcp), val)
366 #define __this_cpu_write_2(pcp, val)    percpu_to_op("mov", (pcp), val)
367 #define __this_cpu_write_4(pcp, val)    percpu_to_op("mov", (pcp), val)
368 #define __this_cpu_add_1(pcp, val)      percpu_add_op((pcp), val)
369 #define __this_cpu_add_2(pcp, val)      percpu_add_op((pcp), val)
370 #define __this_cpu_add_4(pcp, val)      percpu_add_op((pcp), val)
371 #define __this_cpu_and_1(pcp, val)      percpu_to_op("and", (pcp), val)
372 #define __this_cpu_and_2(pcp, val)      percpu_to_op("and", (pcp), val)
373 #define __this_cpu_and_4(pcp, val)      percpu_to_op("and", (pcp), val)
374 #define __this_cpu_or_1(pcp, val)       percpu_to_op("or", (pcp), val)
375 #define __this_cpu_or_2(pcp, val)       percpu_to_op("or", (pcp), val)
376 #define __this_cpu_or_4(pcp, val)       percpu_to_op("or", (pcp), val)
377 #define __this_cpu_xor_1(pcp, val)      percpu_to_op("xor", (pcp), val)
378 #define __this_cpu_xor_2(pcp, val)      percpu_to_op("xor", (pcp), val)
379 #define __this_cpu_xor_4(pcp, val)      percpu_to_op("xor", (pcp), val)
380 /*
381  * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
382  * faster than an xchg with forced lock semantics.
383  */
384 #define __this_cpu_xchg_8(pcp, nval)    percpu_xchg_op(pcp, nval)
385 #define __this_cpu_cmpxchg_8(pcp, oval, nval)   percpu_cmpxchg_op(pcp, oval, nval)
386
387 #define this_cpu_read_1(pcp)            percpu_from_op("mov", (pcp), "m"(pcp))
388 #define this_cpu_read_2(pcp)            percpu_from_op("mov", (pcp), "m"(pcp))
389 #define this_cpu_read_4(pcp)            percpu_from_op("mov", (pcp), "m"(pcp))
390 #define this_cpu_write_1(pcp, val)      percpu_to_op("mov", (pcp), val)
391 #define this_cpu_write_2(pcp, val)      percpu_to_op("mov", (pcp), val)
392 #define this_cpu_write_4(pcp, val)      percpu_to_op("mov", (pcp), val)
393 #define this_cpu_add_1(pcp, val)        percpu_add_op((pcp), val)
394 #define this_cpu_add_2(pcp, val)        percpu_add_op((pcp), val)
395 #define this_cpu_add_4(pcp, val)        percpu_add_op((pcp), val)
396 #define this_cpu_and_1(pcp, val)        percpu_to_op("and", (pcp), val)
397 #define this_cpu_and_2(pcp, val)        percpu_to_op("and", (pcp), val)
398 #define this_cpu_and_4(pcp, val)        percpu_to_op("and", (pcp), val)
399 #define this_cpu_or_1(pcp, val)         percpu_to_op("or", (pcp), val)
400 #define this_cpu_or_2(pcp, val)         percpu_to_op("or", (pcp), val)
401 #define this_cpu_or_4(pcp, val)         percpu_to_op("or", (pcp), val)
402 #define this_cpu_xor_1(pcp, val)        percpu_to_op("xor", (pcp), val)
403 #define this_cpu_xor_2(pcp, val)        percpu_to_op("xor", (pcp), val)
404 #define this_cpu_xor_4(pcp, val)        percpu_to_op("xor", (pcp), val)
405 #define this_cpu_xchg_1(pcp, nval)      percpu_xchg_op(pcp, nval)
406 #define this_cpu_xchg_2(pcp, nval)      percpu_xchg_op(pcp, nval)
407 #define this_cpu_xchg_4(pcp, nval)      percpu_xchg_op(pcp, nval)
408 #define this_cpu_xchg_8(pcp, nval)      percpu_xchg_op(pcp, nval)
409 #define this_cpu_cmpxchg_8(pcp, oval, nval)     percpu_cmpxchg_op(pcp, oval, nval)
410
411 #define irqsafe_cpu_add_1(pcp, val)     percpu_add_op((pcp), val)
412 #define irqsafe_cpu_add_2(pcp, val)     percpu_add_op((pcp), val)
413 #define irqsafe_cpu_add_4(pcp, val)     percpu_add_op((pcp), val)
414 #define irqsafe_cpu_and_1(pcp, val)     percpu_to_op("and", (pcp), val)
415 #define irqsafe_cpu_and_2(pcp, val)     percpu_to_op("and", (pcp), val)
416 #define irqsafe_cpu_and_4(pcp, val)     percpu_to_op("and", (pcp), val)
417 #define irqsafe_cpu_or_1(pcp, val)      percpu_to_op("or", (pcp), val)
418 #define irqsafe_cpu_or_2(pcp, val)      percpu_to_op("or", (pcp), val)
419 #define irqsafe_cpu_or_4(pcp, val)      percpu_to_op("or", (pcp), val)
420 #define irqsafe_cpu_xor_1(pcp, val)     percpu_to_op("xor", (pcp), val)
421 #define irqsafe_cpu_xor_2(pcp, val)     percpu_to_op("xor", (pcp), val)
422 #define irqsafe_cpu_xor_4(pcp, val)     percpu_to_op("xor", (pcp), val)
423 #define irqsafe_cpu_xchg_1(pcp, nval)   percpu_xchg_op(pcp, nval)
424 #define irqsafe_cpu_xchg_2(pcp, nval)   percpu_xchg_op(pcp, nval)
425 #define irqsafe_cpu_xchg_4(pcp, nval)   percpu_xchg_op(pcp, nval)
426 #define irqsafe_cpu_xchg_8(pcp, nval)   percpu_xchg_op(pcp, nval)
427 #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
428
429 #ifndef CONFIG_M386
430 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
431 #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
432 #define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
433 #define __this_cpu_cmpxchg_1(pcp, oval, nval)   percpu_cmpxchg_op(pcp, oval, nval)
434 #define __this_cpu_cmpxchg_2(pcp, oval, nval)   percpu_cmpxchg_op(pcp, oval, nval)
435 #define __this_cpu_cmpxchg_4(pcp, oval, nval)   percpu_cmpxchg_op(pcp, oval, nval)
436
437 #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
438 #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
439 #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
440 #define this_cpu_cmpxchg_1(pcp, oval, nval)     percpu_cmpxchg_op(pcp, oval, nval)
441 #define this_cpu_cmpxchg_2(pcp, oval, nval)     percpu_cmpxchg_op(pcp, oval, nval)
442 #define this_cpu_cmpxchg_4(pcp, oval, nval)     percpu_cmpxchg_op(pcp, oval, nval)
443
444 #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
445 #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
446 #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
447 #endif /* !CONFIG_M386 */
448
449 /*
450  * Per cpu atomic 64 bit operations are only available under 64 bit.
451  * 32 bit must fall back to generic operations.
452  */
453 #ifdef CONFIG_X86_64
454 #define __this_cpu_read_8(pcp)          percpu_from_op("mov", (pcp), "m"(pcp))
455 #define __this_cpu_write_8(pcp, val)    percpu_to_op("mov", (pcp), val)
456 #define __this_cpu_add_8(pcp, val)      percpu_add_op((pcp), val)
457 #define __this_cpu_and_8(pcp, val)      percpu_to_op("and", (pcp), val)
458 #define __this_cpu_or_8(pcp, val)       percpu_to_op("or", (pcp), val)
459 #define __this_cpu_xor_8(pcp, val)      percpu_to_op("xor", (pcp), val)
460 #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
461
462 #define this_cpu_read_8(pcp)            percpu_from_op("mov", (pcp), "m"(pcp))
463 #define this_cpu_write_8(pcp, val)      percpu_to_op("mov", (pcp), val)
464 #define this_cpu_add_8(pcp, val)        percpu_add_op((pcp), val)
465 #define this_cpu_and_8(pcp, val)        percpu_to_op("and", (pcp), val)
466 #define this_cpu_or_8(pcp, val)         percpu_to_op("or", (pcp), val)
467 #define this_cpu_xor_8(pcp, val)        percpu_to_op("xor", (pcp), val)
468 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
469
470 #define irqsafe_cpu_add_8(pcp, val)     percpu_add_op((pcp), val)
471 #define irqsafe_cpu_and_8(pcp, val)     percpu_to_op("and", (pcp), val)
472 #define irqsafe_cpu_or_8(pcp, val)      percpu_to_op("or", (pcp), val)
473 #define irqsafe_cpu_xor_8(pcp, val)     percpu_to_op("xor", (pcp), val)
474 #endif
475
476 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
477 #define x86_test_and_clear_bit_percpu(bit, var)                         \
478 ({                                                                      \
479         int old__;                                                      \
480         asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
481                      : "=r" (old__), "+m" (var)                         \
482                      : "dIr" (bit));                                    \
483         old__;                                                          \
484 })
485
486 #include <asm-generic/percpu.h>
487
488 /* We can use this directly for local CPU (faster). */
489 DECLARE_PER_CPU(unsigned long, this_cpu_off);
490
491 #endif /* !__ASSEMBLY__ */
492
493 #ifdef CONFIG_SMP
494
495 /*
496  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
497  * variables that are initialized and accessed before there are per_cpu
498  * areas allocated.
499  */
500
501 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)                  \
502         DEFINE_PER_CPU(_type, _name) = _initvalue;                      \
503         __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =       \
504                                 { [0 ... NR_CPUS-1] = _initvalue };     \
505         __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
506
507 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
508         EXPORT_PER_CPU_SYMBOL(_name)
509
510 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
511         DECLARE_PER_CPU(_type, _name);                          \
512         extern __typeof__(_type) *_name##_early_ptr;            \
513         extern __typeof__(_type)  _name##_early_map[]
514
515 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
516 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
517 #define early_per_cpu(_name, _cpu)                              \
518         *(early_per_cpu_ptr(_name) ?                            \
519                 &early_per_cpu_ptr(_name)[_cpu] :               \
520                 &per_cpu(_name, _cpu))
521
522 #else   /* !CONFIG_SMP */
523 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
524         DEFINE_PER_CPU(_type, _name) = _initvalue
525
526 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
527         EXPORT_PER_CPU_SYMBOL(_name)
528
529 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
530         DECLARE_PER_CPU(_type, _name)
531
532 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
533 #define early_per_cpu_ptr(_name) NULL
534 /* no early_per_cpu_map() */
535
536 #endif  /* !CONFIG_SMP */
537
538 #endif /* _ASM_X86_PERCPU_H */