ARM: 7861/1: cacheflush: consolidate single-CPU ARMv7 cache disabling code
authorNicolas Pitre <nicolas.pitre@linaro.org>
Fri, 18 Oct 2013 21:06:03 +0000 (22:06 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Tue, 29 Oct 2013 11:06:23 +0000 (11:06 +0000)
This code is becoming duplicated in many places.  So let's consolidate
it into a handy macro that is known to be right and available for reuse.

Signed-off-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/cacheflush.h
arch/arm/mach-vexpress/dcscb.c
arch/arm/mach-vexpress/tc2_pm.c

index 15f2d5bf8875636e1514d377fc61b426534b3f74..ee753f1749cd795b03557273afdb3d0832d6fc8e 100644 (file)
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
 
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ *   however some exceptions may exist.  Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ *   fp is preserved to the stack explicitly prior disabling the cache
+ *   since adding it to the clobber list is incompatible with having
+ *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering
+ *   trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+       asm volatile( \
+       "stmfd  sp!, {fp, ip} \n\t" \
+       "mrc    p15, 0, r0, c1, c0, 0   @ get SCTLR \n\t" \
+       "bic    r0, r0, #"__stringify(CR_C)" \n\t" \
+       "mcr    p15, 0, r0, c1, c0, 0   @ set SCTLR \n\t" \
+       "isb    \n\t" \
+       "bl     v7_flush_dcache_"__stringify(level)" \n\t" \
+       "clrex  \n\t" \
+       "mrc    p15, 0, r0, c1, c0, 1   @ get ACTLR \n\t" \
+       "bic    r0, r0, #(1 << 6)       @ disable local coherency \n\t" \
+       "mcr    p15, 0, r0, c1, c0, 1   @ set ACTLR \n\t" \
+       "isb    \n\t" \
+       "dsb    \n\t" \
+       "ldmfd  sp!, {fp, ip}" \
+       : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+             "r9","r10","lr","memory" )
+
 #endif
index 3a6384c6c4356129733962ec286a8fae5d83f779..14d499688736b3c816c082f3216aa12503005b64 100644 (file)
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
        if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
                arch_spin_unlock(&dcscb_lock);
 
-               /*
-                * Flush all cache levels for this cluster.
-                *
-                * To do so we do:
-                * - Clear the SCTLR.C bit to prevent further cache allocations
-                * - Flush the whole cache
-                * - Clear the ACTLR "SMP" bit to disable local coherency
-                *
-                * Let's do it in the safest possible way i.e. with
-                * no memory access within the following sequence
-                * including to the stack.
-                *
-                * Note: fp is preserved to the stack explicitly prior doing
-                * this since adding it to the clobber list is incompatible
-                * with having CONFIG_FRAME_POINTER=y.
-                */
-               asm volatile(
-               "str    fp, [sp, #-4]! \n\t"
-               "mrc    p15, 0, r0, c1, c0, 0   @ get CR \n\t"
-               "bic    r0, r0, #"__stringify(CR_C)" \n\t"
-               "mcr    p15, 0, r0, c1, c0, 0   @ set CR \n\t"
-               "isb    \n\t"
-               "bl     v7_flush_dcache_all \n\t"
-               "clrex  \n\t"
-               "mrc    p15, 0, r0, c1, c0, 1   @ get AUXCR \n\t"
-               "bic    r0, r0, #(1 << 6)       @ disable local coherency \n\t"
-               "mcr    p15, 0, r0, c1, c0, 1   @ set AUXCR \n\t"
-               "isb    \n\t"
-               "dsb    \n\t"
-               "ldr    fp, [sp], #4"
-               : : : "r0","r1","r2","r3","r4","r5","r6","r7",
-                     "r9","r10","lr","memory");
+               /* Flush all cache levels for this cluster. */
+               v7_exit_coherency_flush(all);
 
                /*
                 * This is a harmless no-op.  On platforms with a real
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
        } else {
                arch_spin_unlock(&dcscb_lock);
 
-               /*
-                * Flush the local CPU cache.
-                * Let's do it in the safest possible way as above.
-                */
-               asm volatile(
-               "str    fp, [sp, #-4]! \n\t"
-               "mrc    p15, 0, r0, c1, c0, 0   @ get CR \n\t"
-               "bic    r0, r0, #"__stringify(CR_C)" \n\t"
-               "mcr    p15, 0, r0, c1, c0, 0   @ set CR \n\t"
-               "isb    \n\t"
-               "bl     v7_flush_dcache_louis \n\t"
-               "clrex  \n\t"
-               "mrc    p15, 0, r0, c1, c0, 1   @ get AUXCR \n\t"
-               "bic    r0, r0, #(1 << 6)       @ disable local coherency \n\t"
-               "mcr    p15, 0, r0, c1, c0, 1   @ set AUXCR \n\t"
-               "isb    \n\t"
-               "dsb    \n\t"
-               "ldr    fp, [sp], #4"
-               : : : "r0","r1","r2","r3","r4","r5","r6","r7",
-                     "r9","r10","lr","memory");
+               /* Disable and flush the local CPU cache. */
+               v7_exit_coherency_flush(louis);
        }
 
        __mcpm_cpu_down(cpu, cluster);
index e6eb4819291241f30b51d5e7b58c14d1d07c0d32..4eb92ebfd95322e50c1428f7310c8c19e65abd2b 100644 (file)
@@ -156,32 +156,7 @@ static void tc2_pm_down(u64 residency)
                        : : "r" (0x400) );
                }
 
-               /*
-                * We need to disable and flush the whole (L1 and L2) cache.
-                * Let's do it in the safest possible way i.e. with
-                * no memory access within the following sequence
-                * including the stack.
-                *
-                * Note: fp is preserved to the stack explicitly prior doing
-                * this since adding it to the clobber list is incompatible
-                * with having CONFIG_FRAME_POINTER=y.
-                */
-               asm volatile(
-               "str    fp, [sp, #-4]! \n\t"
-               "mrc    p15, 0, r0, c1, c0, 0   @ get CR \n\t"
-               "bic    r0, r0, #"__stringify(CR_C)" \n\t"
-               "mcr    p15, 0, r0, c1, c0, 0   @ set CR \n\t"
-               "isb    \n\t"
-               "bl     v7_flush_dcache_all \n\t"
-               "clrex  \n\t"
-               "mrc    p15, 0, r0, c1, c0, 1   @ get AUXCR \n\t"
-               "bic    r0, r0, #(1 << 6)       @ disable local coherency \n\t"
-               "mcr    p15, 0, r0, c1, c0, 1   @ set AUXCR \n\t"
-               "isb    \n\t"
-               "dsb    \n\t"
-               "ldr    fp, [sp], #4"
-               : : : "r0","r1","r2","r3","r4","r5","r6","r7",
-                     "r9","r10","lr","memory");
+               v7_exit_coherency_flush(all);
 
                cci_disable_port_by_cpu(mpidr);
 
@@ -197,26 +172,7 @@ static void tc2_pm_down(u64 residency)
 
                arch_spin_unlock(&tc2_pm_lock);
 
-               /*
-                * We need to disable and flush only the L1 cache.
-                * Let's do it in the safest possible way as above.
-                */
-               asm volatile(
-               "str    fp, [sp, #-4]! \n\t"
-               "mrc    p15, 0, r0, c1, c0, 0   @ get CR \n\t"
-               "bic    r0, r0, #"__stringify(CR_C)" \n\t"
-               "mcr    p15, 0, r0, c1, c0, 0   @ set CR \n\t"
-               "isb    \n\t"
-               "bl     v7_flush_dcache_louis \n\t"
-               "clrex  \n\t"
-               "mrc    p15, 0, r0, c1, c0, 1   @ get AUXCR \n\t"
-               "bic    r0, r0, #(1 << 6)       @ disable local coherency \n\t"
-               "mcr    p15, 0, r0, c1, c0, 1   @ set AUXCR \n\t"
-               "isb    \n\t"
-               "dsb    \n\t"
-               "ldr    fp, [sp], #4"
-               : : : "r0","r1","r2","r3","r4","r5","r6","r7",
-                     "r9","r10","lr","memory");
+               v7_exit_coherency_flush(louis);
        }
 
        __mcpm_cpu_down(cpu, cluster);