s390/percpu: use generic percpu ops for CONFIG_32BIT
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 21 Oct 2013 05:44:08 +0000 (07:44 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Thu, 24 Oct 2013 15:17:13 +0000 (17:17 +0200)
Remove the special cases for the this_cpu_* functions for 32 bit
in order to make it easier to add additional code for 64 bit.
32 bit will use the generic implementation.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/percpu.h

index 86fe0ee2cee5945beacca37f563d6d118fae0a12..41baca870d0ce6861cc39553dcc695f6cbc7d96d 100644 (file)
  */
 #define __my_cpu_offset S390_lowcore.percpu_offset
 
+#ifdef CONFIG_64BIT
+
 /*
  * For 64 bit module code, the module may be more than 4G above the
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
        do {                                                            \
                old__ = prev__;                                         \
                new__ = old__ op (val);                                 \
-               switch (sizeof(*ptr__)) {                               \
-               case 8:                                                 \
-                       prev__ = cmpxchg64(ptr__, old__, new__);        \
-                       break;                                          \
-               default:                                                \
-                       prev__ = cmpxchg(ptr__, old__, new__);          \
-               }                                                       \
+               prev__ = cmpxchg(ptr__, old__, new__);                  \
        } while (prev__ != old__);                                      \
        preempt_enable();                                               \
        new__;                                                          \
        pcp_op_T__ *ptr__;                                              \
        preempt_disable();                                              \
        ptr__ = __this_cpu_ptr(&(pcp));                                 \
-       switch (sizeof(*ptr__)) {                                       \
-       case 8:                                                         \
-               ret__ = cmpxchg64(ptr__, oval, nval);                   \
-               break;                                                  \
-       default:                                                        \
-               ret__ = cmpxchg(ptr__, oval, nval);                     \
-       }                                                               \
+       ret__ = cmpxchg(ptr__, oval, nval);                             \
        preempt_enable();                                               \
        ret__;                                                          \
 })
 #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
 #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
 
 #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2)       \
 ({                                                                     \
 })
 
 #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
 #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
 
 #include <asm-generic/percpu.h>