2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/cpu-features.h>
21 #include <asm/sgidefs.h>
24 #if _MIPS_SZLONG == 32
26 #define SZLONG_MASK 31UL
31 #elif _MIPS_SZLONG == 64
33 #define SZLONG_MASK 63UL
41 * These are the "slower" versions of the functions and are in bitops.c.
42 * These functions call raw_local_irq_{save,restore}().
44 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
45 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
46 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
47 int __mips_test_and_set_bit(unsigned long nr,
48 volatile unsigned long *addr);
49 int __mips_test_and_set_bit_lock(unsigned long nr,
50 volatile unsigned long *addr);
51 int __mips_test_and_clear_bit(unsigned long nr,
52 volatile unsigned long *addr);
53 int __mips_test_and_change_bit(unsigned long nr,
54 volatile unsigned long *addr);
58 * set_bit - Atomically set a bit in memory
60 * @addr: the address to start counting from
62 * This function is atomic and may not be reordered. See __set_bit()
63 * if you do not require the atomic guarantees.
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
67 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
70 int bit = nr & SZLONG_MASK;
73 if (kernel_uses_llsc && R10000_LLSC_WAR) {
76 "1: " __LL "%0, %1 # set_bit \n"
81 : "=&r" (temp), "=m" (*m)
82 : "ir" (1UL << bit), "m" (*m));
83 #ifdef CONFIG_CPU_MIPSR2
84 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
87 " " __LL "%0, %1 # set_bit \n"
88 " " __INS "%0, %3, %2, 1 \n"
90 : "=&r" (temp), "+m" (*m)
91 : "ir" (bit), "r" (~0));
92 } while (unlikely(!temp));
93 #endif /* CONFIG_CPU_MIPSR2 */
94 } else if (kernel_uses_llsc) {
98 " " __LL "%0, %1 # set_bit \n"
102 : "=&r" (temp), "+m" (*m)
103 : "ir" (1UL << bit));
104 } while (unlikely(!temp));
106 __mips_set_bit(nr, addr);
110 * clear_bit - Clears a bit in memory
112 * @addr: Address to start counting from
114 * clear_bit() is atomic and may not be reordered. However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
117 * in order to ensure changes are visible on other processors.
119 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122 int bit = nr & SZLONG_MASK;
125 if (kernel_uses_llsc && R10000_LLSC_WAR) {
126 __asm__ __volatile__(
127 " .set arch=r4000 \n"
128 "1: " __LL "%0, %1 # clear_bit \n"
133 : "=&r" (temp), "+m" (*m)
134 : "ir" (~(1UL << bit)));
135 #ifdef CONFIG_CPU_MIPSR2
136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138 __asm__ __volatile__(
139 " " __LL "%0, %1 # clear_bit \n"
140 " " __INS "%0, $0, %2, 1 \n"
142 : "=&r" (temp), "+m" (*m)
144 } while (unlikely(!temp));
145 #endif /* CONFIG_CPU_MIPSR2 */
146 } else if (kernel_uses_llsc) {
148 __asm__ __volatile__(
149 " .set arch=r4000 \n"
150 " " __LL "%0, %1 # clear_bit \n"
154 : "=&r" (temp), "+m" (*m)
155 : "ir" (~(1UL << bit)));
156 } while (unlikely(!temp));
158 __mips_clear_bit(nr, addr);
162 * clear_bit_unlock - Clears a bit in memory
164 * @addr: Address to start counting from
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
169 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171 smp_mb__before_atomic();
176 * change_bit - Toggle a bit in memory
178 * @addr: Address to start counting from
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
184 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186 int bit = nr & SZLONG_MASK;
188 if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
192 __asm__ __volatile__(
193 " .set arch=r4000 \n"
194 "1: " __LL "%0, %1 # change_bit \n"
199 : "=&r" (temp), "+m" (*m)
200 : "ir" (1UL << bit));
201 } else if (kernel_uses_llsc) {
202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
206 __asm__ __volatile__(
207 " .set arch=r4000 \n"
208 " " __LL "%0, %1 # change_bit \n"
212 : "=&r" (temp), "+m" (*m)
213 : "ir" (1UL << bit));
214 } while (unlikely(!temp));
216 __mips_change_bit(nr, addr);
220 * test_and_set_bit - Set a bit and return its old value
222 * @addr: Address to count from
224 * This operation is atomic and cannot be reordered.
225 * It also implies a memory barrier.
227 static inline int test_and_set_bit(unsigned long nr,
228 volatile unsigned long *addr)
230 int bit = nr & SZLONG_MASK;
233 smp_mb__before_llsc();
235 if (kernel_uses_llsc && R10000_LLSC_WAR) {
236 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
239 __asm__ __volatile__(
240 " .set arch=r4000 \n"
241 "1: " __LL "%0, %1 # test_and_set_bit \n"
247 : "=&r" (temp), "+m" (*m), "=&r" (res)
250 } else if (kernel_uses_llsc) {
251 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
255 __asm__ __volatile__(
256 " .set arch=r4000 \n"
257 " " __LL "%0, %1 # test_and_set_bit \n"
261 : "=&r" (temp), "+m" (*m), "=&r" (res)
264 } while (unlikely(!res));
266 res = temp & (1UL << bit);
268 res = __mips_test_and_set_bit(nr, addr);
276 * test_and_set_bit_lock - Set a bit and return its old value
278 * @addr: Address to count from
280 * This operation is atomic and implies acquire ordering semantics
281 * after the memory operation.
283 static inline int test_and_set_bit_lock(unsigned long nr,
284 volatile unsigned long *addr)
286 int bit = nr & SZLONG_MASK;
289 if (kernel_uses_llsc && R10000_LLSC_WAR) {
290 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
293 __asm__ __volatile__(
294 " .set arch=r4000 \n"
295 "1: " __LL "%0, %1 # test_and_set_bit \n"
301 : "=&r" (temp), "+m" (*m), "=&r" (res)
304 } else if (kernel_uses_llsc) {
305 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
309 __asm__ __volatile__(
310 " .set arch=r4000 \n"
311 " " __LL "%0, %1 # test_and_set_bit \n"
315 : "=&r" (temp), "+m" (*m), "=&r" (res)
318 } while (unlikely(!res));
320 res = temp & (1UL << bit);
322 res = __mips_test_and_set_bit_lock(nr, addr);
329 * test_and_clear_bit - Clear a bit and return its old value
331 * @addr: Address to count from
333 * This operation is atomic and cannot be reordered.
334 * It also implies a memory barrier.
336 static inline int test_and_clear_bit(unsigned long nr,
337 volatile unsigned long *addr)
339 int bit = nr & SZLONG_MASK;
342 smp_mb__before_llsc();
344 if (kernel_uses_llsc && R10000_LLSC_WAR) {
345 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
348 __asm__ __volatile__(
349 " .set arch=r4000 \n"
350 "1: " __LL "%0, %1 # test_and_clear_bit \n"
357 : "=&r" (temp), "+m" (*m), "=&r" (res)
360 #ifdef CONFIG_CPU_MIPSR2
361 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
362 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
366 __asm__ __volatile__(
367 " " __LL "%0, %1 # test_and_clear_bit \n"
368 " " __EXT "%2, %0, %3, 1 \n"
369 " " __INS "%0, $0, %3, 1 \n"
371 : "=&r" (temp), "+m" (*m), "=&r" (res)
374 } while (unlikely(!temp));
376 } else if (kernel_uses_llsc) {
377 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
381 __asm__ __volatile__(
382 " .set arch=r4000 \n"
383 " " __LL "%0, %1 # test_and_clear_bit \n"
388 : "=&r" (temp), "+m" (*m), "=&r" (res)
391 } while (unlikely(!res));
393 res = temp & (1UL << bit);
395 res = __mips_test_and_clear_bit(nr, addr);
403 * test_and_change_bit - Change a bit and return its old value
405 * @addr: Address to count from
407 * This operation is atomic and cannot be reordered.
408 * It also implies a memory barrier.
410 static inline int test_and_change_bit(unsigned long nr,
411 volatile unsigned long *addr)
413 int bit = nr & SZLONG_MASK;
416 smp_mb__before_llsc();
418 if (kernel_uses_llsc && R10000_LLSC_WAR) {
419 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
422 __asm__ __volatile__(
423 " .set arch=r4000 \n"
424 "1: " __LL "%0, %1 # test_and_change_bit \n"
430 : "=&r" (temp), "+m" (*m), "=&r" (res)
433 } else if (kernel_uses_llsc) {
434 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
438 __asm__ __volatile__(
439 " .set arch=r4000 \n"
440 " " __LL "%0, %1 # test_and_change_bit \n"
442 " " __SC "\t%2, %1 \n"
444 : "=&r" (temp), "+m" (*m), "=&r" (res)
447 } while (unlikely(!res));
449 res = temp & (1UL << bit);
451 res = __mips_test_and_change_bit(nr, addr);
458 #include <asm-generic/bitops/non-atomic.h>
461 * __clear_bit_unlock - Clears a bit in memory
463 * @addr: Address to start counting from
465 * __clear_bit() is non-atomic and implies release semantics before the memory
466 * operation. It can be used for an unlock if no other CPUs can concurrently
467 * modify other bits in the word.
469 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
472 __clear_bit(nr, addr);
476 * Return the bit position (0..63) of the most significant 1 bit in a word
477 * Returns -1 if no 1 bit exists
479 static inline unsigned long __fls(unsigned long word)
483 if (BITS_PER_LONG == 32 &&
484 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
496 if (BITS_PER_LONG == 64 &&
497 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
509 num = BITS_PER_LONG - 1;
511 #if BITS_PER_LONG == 64
512 if (!(word & (~0ul << 32))) {
517 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
521 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
525 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
529 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
533 if (!(word & (~0ul << (BITS_PER_LONG-1))))
539 * __ffs - find first bit in word.
540 * @word: The word to search
542 * Returns 0..SZLONG-1
543 * Undefined if no bit exists, so code should check against 0 first.
545 static inline unsigned long __ffs(unsigned long word)
547 return __fls(word & -word);
551 * fls - find last bit set.
552 * @word: The word to search
554 * This is defined the same way as ffs.
555 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
557 static inline int fls(int x)
561 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
576 if (!(x & 0xffff0000u)) {
580 if (!(x & 0xff000000u)) {
584 if (!(x & 0xf0000000u)) {
588 if (!(x & 0xc0000000u)) {
592 if (!(x & 0x80000000u)) {
599 #include <asm-generic/bitops/fls64.h>
602 * ffs - find first bit set.
603 * @word: The word to search
605 * This is defined the same way as
606 * the libc and compiler builtin ffs routines, therefore
607 * differs in spirit from the above ffz (man ffs).
609 static inline int ffs(int word)
614 return fls(word & -word);
617 #include <asm-generic/bitops/ffz.h>
618 #include <asm-generic/bitops/find.h>
622 #include <asm-generic/bitops/sched.h>
624 #include <asm/arch_hweight.h>
625 #include <asm-generic/bitops/const_hweight.h>
627 #include <asm-generic/bitops/le.h>
628 #include <asm-generic/bitops/ext2-atomic.h>
630 #endif /* __KERNEL__ */
632 #endif /* _ASM_BITOPS_H */