From: Ivan Kokshaysky Date: Fri, 21 Oct 2005 18:06:15 +0000 (+0400) Subject: [PATCH] alpha: additional smp barriers X-Git-Tag: firefly_0821_release~39870^2~6^2~45 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=d475f3f47a0427dfee483cecf9a7e9109e991423;p=firefly-linux-kernel-4.4.55.git [PATCH] alpha: additional smp barriers As stated in Documentation/atomic_ops.txt, atomic functions returning values must have the memory barriers both before and after the operation. Thanks to DaveM for pointing that out. Signed-off-by: Ivan Kokshaysky Signed-off-by: Linus Torvalds --- diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 1b383e3cb68c..0b40bad00289 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) static __inline__ long atomic_add_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%3,%2\n" " addl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } @@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v) static __inline__ long atomic64_add_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " addq %0,%3,%2\n" " addq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic_sub_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%3,%2\n" " subl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " subq %0,%3,%2\n" " subq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; }