#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_DCACHE_DISABLE)
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
#ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_DCACHE_DISABLE)
#define __down_op(ptr,fail) \
({ \
#ifdef swp_is_buggy
unsigned long flags;
#endif
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_DCACHE_DISABLE)
unsigned int tmp;
#endif
smp_mb();
switch (size) {
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_DCACHE_DISABLE)
case 1:
asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n"
#include <asm-generic/cmpxchg-local.h>
-#if __LINUX_ARM_ARCH__ < 6
+#if __LINUX_ARM_ARCH__ < 6 || defined(CONFIG_CPU_DCACHE_DISABLE)
/* min ARCH < ARMv6 */
#ifdef CONFIG_SMP
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
-#elif __LINUX_ARM_ARCH__ < 6
+#elif __LINUX_ARM_ARCH__ < 6 || defined(CONFIG_CPU_DCACHE_DISABLE)
#ifdef CONFIG_MMU
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_DCACHE_DISABLE)
.macro bitop, instr
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
config CPU_DCACHE_DISABLE
bool "Disable D-Cache (C-bit)"
depends on CPU_CP15
+ depends on !SMP && !SWP_EMULATE
+ select GENERIC_ATOMIC64
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.