2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
12 #include <linux/compiler.h>
14 #include <asm/barrier.h>
15 #include <asm/compiler.h>
19 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
24 * These are fair FIFO ticket locks
26 * (the type definitions are in asm/spinlock_types.h)
31 * Ticket locks are conceptually two parts, one indicating the current head of
32 * the queue, and the other indicating the current tail. The lock is acquired
33 * by atomically noting the tail and incrementing it by one (thus adding
34 * ourself to the queue and noting our position), then waiting until the head
35 * becomes equal to the the initial value of the tail.
38 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
40 u32 counters = ACCESS_ONCE(lock->lock);
42 return ((counters >> 16) ^ counters) & 0xffff;
45 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
46 #define arch_spin_unlock_wait(x) \
47 while (arch_spin_is_locked(x)) { cpu_relax(); }
49 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
51 u32 counters = ACCESS_ONCE(lock->lock);
53 return (((counters >> 16) - counters) & 0xffff) > 1;
55 #define arch_spin_is_contended arch_spin_is_contended
57 static inline void arch_spin_lock(arch_spinlock_t *lock)
63 if (R10000_LLSC_WAR) {
64 __asm__ __volatile__ (
65 " .set push # arch_spin_lock \n"
68 "1: ll %[ticket], %[ticket_ptr] \n"
69 " addu %[my_ticket], %[ticket], %[inc] \n"
70 " sc %[my_ticket], %[ticket_ptr] \n"
71 " beqzl %[my_ticket], 1b \n"
73 " srl %[my_ticket], %[ticket], 16 \n"
74 " andi %[ticket], %[ticket], 0xffff \n"
75 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n"
79 "4: andi %[ticket], %[ticket], 0xffff \n"
80 " sll %[ticket], 5 \n"
82 "6: bnez %[ticket], 6b \n"
83 " subu %[ticket], 1 \n"
85 " lhu %[ticket], %[serving_now_ptr] \n"
86 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
89 " subu %[ticket], %[ticket], 1 \n"
92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
93 [serving_now_ptr] "+m" (lock->h.serving_now),
95 [my_ticket] "=&r" (my_ticket)
98 __asm__ __volatile__ (
99 " .set push # arch_spin_lock \n"
102 "1: ll %[ticket], %[ticket_ptr] \n"
103 " addu %[my_ticket], %[ticket], %[inc] \n"
104 " sc %[my_ticket], %[ticket_ptr] \n"
105 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n"
108 " bne %[ticket], %[my_ticket], 4f \n"
109 " subu %[ticket], %[my_ticket], %[ticket] \n"
112 "4: andi %[ticket], %[ticket], 0xffff \n"
113 " sll %[ticket], 5 \n"
115 "6: bnez %[ticket], 6b \n"
116 " subu %[ticket], 1 \n"
118 " lhu %[ticket], %[serving_now_ptr] \n"
119 " beq %[ticket], %[my_ticket], 2b \n"
120 " subu %[ticket], %[my_ticket], %[ticket] \n"
122 " subu %[ticket], %[ticket], 1 \n"
125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
126 [serving_now_ptr] "+m" (lock->h.serving_now),
127 [ticket] "=&r" (tmp),
128 [my_ticket] "=&r" (my_ticket)
135 static inline void arch_spin_unlock(arch_spinlock_t *lock)
137 unsigned int serving_now = lock->h.serving_now + 1;
139 lock->h.serving_now = (u16)serving_now;
143 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
148 if (R10000_LLSC_WAR) {
149 __asm__ __volatile__ (
150 " .set push # arch_spin_trylock \n"
153 "1: ll %[ticket], %[ticket_ptr] \n"
154 " srl %[my_ticket], %[ticket], 16 \n"
155 " andi %[now_serving], %[ticket], 0xffff \n"
156 " bne %[my_ticket], %[now_serving], 3f \n"
157 " addu %[ticket], %[ticket], %[inc] \n"
158 " sc %[ticket], %[ticket_ptr] \n"
159 " beqzl %[ticket], 1b \n"
160 " li %[ticket], 1 \n"
164 " li %[ticket], 0 \n"
167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2),
170 [now_serving] "=&r" (tmp3)
173 __asm__ __volatile__ (
174 " .set push # arch_spin_trylock \n"
177 "1: ll %[ticket], %[ticket_ptr] \n"
178 " srl %[my_ticket], %[ticket], 16 \n"
179 " andi %[now_serving], %[ticket], 0xffff \n"
180 " bne %[my_ticket], %[now_serving], 3f \n"
181 " addu %[ticket], %[ticket], %[inc] \n"
182 " sc %[ticket], %[ticket_ptr] \n"
183 " beqz %[ticket], 1b \n"
184 " li %[ticket], 1 \n"
188 " li %[ticket], 0 \n"
191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2),
194 [now_serving] "=&r" (tmp3)
204 * Read-write spinlocks, allowing multiple readers but only one writer.
206 * NOTE! it is quite common to have readers in interrupts but no interrupt
207 * writers. For those circumstances we can "mix" irq-safe locks - any writer
208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
213 * read_can_lock - would read_trylock() succeed?
214 * @lock: the rwlock in question.
216 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
219 * write_can_lock - would write_trylock() succeed?
220 * @lock: the rwlock in question.
222 #define arch_write_can_lock(rw) (!(rw)->lock)
224 static inline void arch_read_lock(arch_rwlock_t *rw)
228 if (R10000_LLSC_WAR) {
229 __asm__ __volatile__(
230 " .set noreorder # arch_read_lock \n"
238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF_SMALL_ASM() (rw->lock)
243 __asm__ __volatile__(
244 "1: ll %1, %2 # arch_read_lock \n"
248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF_SMALL_ASM() (rw->lock)
251 } while (unlikely(!tmp));
257 static inline void arch_read_unlock(arch_rwlock_t *rw)
261 smp_mb__before_llsc();
263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__(
265 "1: ll %1, %2 # arch_read_unlock \n"
269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
270 : GCC_OFF_SMALL_ASM() (rw->lock)
274 __asm__ __volatile__(
275 "1: ll %1, %2 # arch_read_unlock \n"
278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
279 : GCC_OFF_SMALL_ASM() (rw->lock)
281 } while (unlikely(!tmp));
285 static inline void arch_write_lock(arch_rwlock_t *rw)
289 if (R10000_LLSC_WAR) {
290 __asm__ __volatile__(
291 " .set noreorder # arch_write_lock \n"
299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
300 : GCC_OFF_SMALL_ASM() (rw->lock)
304 __asm__ __volatile__(
305 "1: ll %1, %2 # arch_write_lock \n"
309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
310 : GCC_OFF_SMALL_ASM() (rw->lock)
312 } while (unlikely(!tmp));
318 static inline void arch_write_unlock(arch_rwlock_t *rw)
320 smp_mb__before_llsc();
322 __asm__ __volatile__(
323 " # arch_write_unlock \n"
330 static inline int arch_read_trylock(arch_rwlock_t *rw)
335 if (R10000_LLSC_WAR) {
336 __asm__ __volatile__(
337 " .set noreorder # arch_read_trylock \n"
349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
350 : GCC_OFF_SMALL_ASM() (rw->lock)
353 __asm__ __volatile__(
354 " .set noreorder # arch_read_trylock \n"
366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
367 : GCC_OFF_SMALL_ASM() (rw->lock)
374 static inline int arch_write_trylock(arch_rwlock_t *rw)
379 if (R10000_LLSC_WAR) {
380 __asm__ __volatile__(
381 " .set noreorder # arch_write_trylock \n"
393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
394 : GCC_OFF_SMALL_ASM() (rw->lock)
398 __asm__ __volatile__(
399 " ll %1, %3 # arch_write_trylock \n"
406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
408 : GCC_OFF_SMALL_ASM() (rw->lock)
410 } while (unlikely(!tmp));
418 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
419 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
421 #define arch_spin_relax(lock) cpu_relax()
422 #define arch_read_relax(lock) cpu_relax()
423 #define arch_write_relax(lock) cpu_relax()
425 #endif /* _ASM_SPINLOCK_H */