2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
12 #include <linux/compiler.h>
14 #include <asm/barrier.h>
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
20 * Simple spin lock operations. There are two variants, one clears IRQ's
21 * on the local processor, one does not.
23 * These are fair FIFO ticket locks
25 * (the type definitions are in asm/spinlock_types.h)
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
37 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
39 unsigned int counters = ACCESS_ONCE(lock->lock);
41 return ((counters >> 14) ^ counters) & 0x1fff;
44 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
45 #define __raw_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); }
48 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
50 unsigned int counters = ACCESS_ONCE(lock->lock);
52 return (((counters >> 14) - counters) & 0x1fff) > 1;
55 static inline void __raw_spin_lock(raw_spinlock_t *lock)
60 if (R10000_LLSC_WAR) {
61 __asm__ __volatile__ (
62 " .set push # __raw_spin_lock \n"
65 "1: ll %[ticket], %[ticket_ptr] \n"
66 " addiu %[my_ticket], %[ticket], 0x4000 \n"
67 " sc %[my_ticket], %[ticket_ptr] \n"
68 " beqzl %[my_ticket], 1b \n"
70 " srl %[my_ticket], %[ticket], 14 \n"
71 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
72 " andi %[ticket], %[ticket], 0x1fff \n"
73 " bne %[ticket], %[my_ticket], 4f \n"
74 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "4: andi %[ticket], %[ticket], 0x1fff \n"
78 "5: sll %[ticket], 5 \n"
80 "6: bnez %[ticket], 6b \n"
81 " subu %[ticket], 1 \n"
83 " lw %[ticket], %[ticket_ptr] \n"
84 " andi %[ticket], %[ticket], 0x1fff \n"
85 " beq %[ticket], %[my_ticket], 2b \n"
86 " subu %[ticket], %[my_ticket], %[ticket] \n"
88 " subu %[ticket], %[ticket], 1 \n"
91 : [ticket_ptr] "+m" (lock->lock),
93 [my_ticket] "=&r" (my_ticket));
95 __asm__ __volatile__ (
96 " .set push # __raw_spin_lock \n"
99 " ll %[ticket], %[ticket_ptr] \n"
100 "1: addiu %[my_ticket], %[ticket], 0x4000 \n"
101 " sc %[my_ticket], %[ticket_ptr] \n"
102 " beqz %[my_ticket], 3f \n"
104 " srl %[my_ticket], %[ticket], 14 \n"
105 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
106 " andi %[ticket], %[ticket], 0x1fff \n"
107 " bne %[ticket], %[my_ticket], 4f \n"
108 " subu %[ticket], %[my_ticket], %[ticket] \n"
112 " ll %[ticket], %[ticket_ptr] \n"
114 "4: andi %[ticket], %[ticket], 0x1fff \n"
115 "5: sll %[ticket], 5 \n"
117 "6: bnez %[ticket], 6b \n"
118 " subu %[ticket], 1 \n"
120 " lw %[ticket], %[ticket_ptr] \n"
121 " andi %[ticket], %[ticket], 0x1fff \n"
122 " beq %[ticket], %[my_ticket], 2b \n"
123 " subu %[ticket], %[my_ticket], %[ticket] \n"
125 " subu %[ticket], %[ticket], 1 \n"
128 : [ticket_ptr] "+m" (lock->lock),
129 [ticket] "=&r" (tmp),
130 [my_ticket] "=&r" (my_ticket));
136 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
142 if (R10000_LLSC_WAR) {
143 __asm__ __volatile__ (
144 " # __raw_spin_unlock \n"
145 "1: ll %[ticket], %[ticket_ptr] \n"
146 " addiu %[ticket], %[ticket], 1 \n"
147 " ori %[ticket], %[ticket], 0x2000 \n"
148 " xori %[ticket], %[ticket], 0x2000 \n"
149 " sc %[ticket], %[ticket_ptr] \n"
150 " beqzl %[ticket], 2f \n"
151 : [ticket_ptr] "+m" (lock->lock),
152 [ticket] "=&r" (tmp));
154 __asm__ __volatile__ (
155 " .set push # __raw_spin_unlock \n"
158 " ll %[ticket], %[ticket_ptr] \n"
159 "1: addiu %[ticket], %[ticket], 1 \n"
160 " ori %[ticket], %[ticket], 0x2000 \n"
161 " xori %[ticket], %[ticket], 0x2000 \n"
162 " sc %[ticket], %[ticket_ptr] \n"
163 " beqz %[ticket], 2f \n"
168 " ll %[ticket], %[ticket_ptr] \n"
171 : [ticket_ptr] "+m" (lock->lock),
172 [ticket] "=&r" (tmp));
176 static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
180 if (R10000_LLSC_WAR) {
181 __asm__ __volatile__ (
182 " .set push # __raw_spin_trylock \n"
185 "1: ll %[ticket], %[ticket_ptr] \n"
186 " srl %[my_ticket], %[ticket], 14 \n"
187 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
188 " andi %[now_serving], %[ticket], 0x1fff \n"
189 " bne %[my_ticket], %[now_serving], 3f \n"
190 " addiu %[ticket], %[ticket], 0x4000 \n"
191 " sc %[ticket], %[ticket_ptr] \n"
192 " beqzl %[ticket], 1b \n"
193 " li %[ticket], 1 \n"
197 " li %[ticket], 0 \n"
200 : [ticket_ptr] "+m" (lock->lock),
201 [ticket] "=&r" (tmp),
202 [my_ticket] "=&r" (tmp2),
203 [now_serving] "=&r" (tmp3));
205 __asm__ __volatile__ (
206 " .set push # __raw_spin_trylock \n"
209 " ll %[ticket], %[ticket_ptr] \n"
210 "1: srl %[my_ticket], %[ticket], 14 \n"
211 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
212 " andi %[now_serving], %[ticket], 0x1fff \n"
213 " bne %[my_ticket], %[now_serving], 3f \n"
214 " addiu %[ticket], %[ticket], 0x4000 \n"
215 " sc %[ticket], %[ticket_ptr] \n"
216 " beqz %[ticket], 4f \n"
217 " li %[ticket], 1 \n"
221 " li %[ticket], 0 \n"
223 " ll %[ticket], %[ticket_ptr] \n"
226 : [ticket_ptr] "+m" (lock->lock),
227 [ticket] "=&r" (tmp),
228 [my_ticket] "=&r" (tmp2),
229 [now_serving] "=&r" (tmp3));
238 * Read-write spinlocks, allowing multiple readers but only one writer.
240 * NOTE! it is quite common to have readers in interrupts but no interrupt
241 * writers. For those circumstances we can "mix" irq-safe locks - any writer
242 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
247 * read_can_lock - would read_trylock() succeed?
248 * @lock: the rwlock in question.
250 #define __raw_read_can_lock(rw) ((rw)->lock >= 0)
253 * write_can_lock - would write_trylock() succeed?
254 * @lock: the rwlock in question.
256 #define __raw_write_can_lock(rw) (!(rw)->lock)
258 static inline void __raw_read_lock(raw_rwlock_t *rw)
262 if (R10000_LLSC_WAR) {
263 __asm__ __volatile__(
264 " .set noreorder # __raw_read_lock \n"
272 : "=m" (rw->lock), "=&r" (tmp)
276 __asm__ __volatile__(
277 " .set noreorder # __raw_read_lock \n"
292 : "=m" (rw->lock), "=&r" (tmp)
300 /* Note the use of sub, not subu which will make the kernel die with an
301 overflow exception if we ever try to unlock an rwlock that is already
302 unlocked or is being held by a writer. */
303 static inline void __raw_read_unlock(raw_rwlock_t *rw)
309 if (R10000_LLSC_WAR) {
310 __asm__ __volatile__(
311 "1: ll %1, %2 # __raw_read_unlock \n"
315 : "=m" (rw->lock), "=&r" (tmp)
319 __asm__ __volatile__(
320 " .set noreorder # __raw_read_unlock \n"
331 : "=m" (rw->lock), "=&r" (tmp)
337 static inline void __raw_write_lock(raw_rwlock_t *rw)
341 if (R10000_LLSC_WAR) {
342 __asm__ __volatile__(
343 " .set noreorder # __raw_write_lock \n"
351 : "=m" (rw->lock), "=&r" (tmp)
355 __asm__ __volatile__(
356 " .set noreorder # __raw_write_lock \n"
371 : "=m" (rw->lock), "=&r" (tmp)
379 static inline void __raw_write_unlock(raw_rwlock_t *rw)
383 __asm__ __volatile__(
384 " # __raw_write_unlock \n"
391 static inline int __raw_read_trylock(raw_rwlock_t *rw)
396 if (R10000_LLSC_WAR) {
397 __asm__ __volatile__(
398 " .set noreorder # __raw_read_trylock \n"
410 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
414 __asm__ __volatile__(
415 " .set noreorder # __raw_read_trylock \n"
427 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
435 static inline int __raw_write_trylock(raw_rwlock_t *rw)
440 if (R10000_LLSC_WAR) {
441 __asm__ __volatile__(
442 " .set noreorder # __raw_write_trylock \n"
454 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
458 __asm__ __volatile__(
459 " .set noreorder # __raw_write_trylock \n"
474 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
483 #define _raw_spin_relax(lock) cpu_relax()
484 #define _raw_read_relax(lock) cpu_relax()
485 #define _raw_write_relax(lock) cpu_relax()
487 #endif /* _ASM_SPINLOCK_H */