1 #ifndef ____MLP_LOCK_H__
2 #define ____MLP_LOCK_H__
11 #define __xg(x) ((volatile INTPTR *)(x))
13 #define CFENCE asm volatile("":::"memory");
16 ".section .smp_locks,\"a\"\n" \
18 " .long 661f\n" /* address */\
23 static inline void atomic_dec(volatile int *v) {
24 __asm__ __volatile__ (LOCK_PREFIX "decl %0"
28 static inline void atomic_inc(volatile int *v) {
29 __asm__ __volatile__ (LOCK_PREFIX "incl %0"
33 // this returns TRUE if the atomic subtraction results in
34 // a zero value--this way two threads cannot dec a value
35 // atomically, but then go ahead and both read zero,
36 // thinking they both are the last decrementer
37 static inline int atomic_sub_and_test(int i, volatile int *v) {
40 __asm__ __volatile__ (LOCK_PREFIX "subl %2,%0; sete %1"
41 : "+m" (*v), "=qm" (c)
42 : "ir" (i) : "memory");
46 static inline int LOCKXCHG32(volatile int* ptr, int val){
48 //note: xchgl always implies lock
49 __asm__ __volatile__("xchgl %0,%1"
58 static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){
60 //note: xchgl always implies lock
61 __asm__ __volatile__("xchgq %0,%1"
69 #define LOCKXCHG LOCKXCHG32
73 static inline int write_trylock(volatile int *lock) {
75 __asm__ __volatile__("xchgl %0,%1"
77 : "m"(*lock), "0"(retval)
84 static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){
86 __asm__ __volatile__("lock; cmpxchgq %1,%2"
88 : "r"(new), "m"(*__xg(ptr)), "0"(old)
93 static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){
95 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
97 : "r"(new), "m"(*__xg(ptr)), "0"(old)
103 static inline int BARRIER(){
109 #endif // ____MLP_LOCK_H__