1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
6 #define BIT(nr) (1UL << (nr))
7 #define BIT_ULL(nr) (1ULL << (nr))
8 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
9 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
10 #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
11 #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
12 #define BITS_PER_BYTE 8
13 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
17 * Create a contiguous bitmask starting at bit position @l and ending at
18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
21 #define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
22 #define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
24 extern unsigned int __sw_hweight8(unsigned int w);
25 extern unsigned int __sw_hweight16(unsigned int w);
26 extern unsigned int __sw_hweight32(unsigned int w);
27 extern unsigned long __sw_hweight64(__u64 w);
30 * Include this here because some architectures need generic_ffs/fls in
33 #include <asm/bitops.h>
36 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
37 * We need the ugly external functions to break header recursion hell.
39 #ifndef smp_mb__before_clear_bit
40 static inline void __deprecated smp_mb__before_clear_bit(void)
42 extern void __smp_mb__before_atomic(void);
43 __smp_mb__before_atomic();
47 #ifndef smp_mb__after_clear_bit
48 static inline void __deprecated smp_mb__after_clear_bit(void)
50 extern void __smp_mb__after_atomic(void);
51 __smp_mb__after_atomic();
55 #define for_each_set_bit(bit, addr, size) \
56 for ((bit) = find_first_bit((addr), (size)); \
58 (bit) = find_next_bit((addr), (size), (bit) + 1))
60 /* same as for_each_set_bit() but use bit as value to start with */
61 #define for_each_set_bit_from(bit, addr, size) \
62 for ((bit) = find_next_bit((addr), (size), (bit)); \
64 (bit) = find_next_bit((addr), (size), (bit) + 1))
66 #define for_each_clear_bit(bit, addr, size) \
67 for ((bit) = find_first_zero_bit((addr), (size)); \
69 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
71 /* same as for_each_clear_bit() but use bit as value to start with */
72 #define for_each_clear_bit_from(bit, addr, size) \
73 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
75 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
77 static __inline__ int get_bitmask_order(unsigned int count)
82 return order; /* We could be slightly more clever with -1 here... */
85 static __inline__ int get_count_order(unsigned int count)
89 order = fls(count) - 1;
90 if (count & (count - 1))
95 static inline unsigned long hweight_long(unsigned long w)
97 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
101 * rol64 - rotate a 64-bit value left
102 * @word: value to rotate
103 * @shift: bits to roll
105 static inline __u64 rol64(__u64 word, unsigned int shift)
107 return (word << shift) | (word >> (64 - shift));
111 * ror64 - rotate a 64-bit value right
112 * @word: value to rotate
113 * @shift: bits to roll
115 static inline __u64 ror64(__u64 word, unsigned int shift)
117 return (word >> shift) | (word << (64 - shift));
121 * rol32 - rotate a 32-bit value left
122 * @word: value to rotate
123 * @shift: bits to roll
125 static inline __u32 rol32(__u32 word, unsigned int shift)
127 return (word << shift) | (word >> (32 - shift));
131 * ror32 - rotate a 32-bit value right
132 * @word: value to rotate
133 * @shift: bits to roll
135 static inline __u32 ror32(__u32 word, unsigned int shift)
137 return (word >> shift) | (word << (32 - shift));
141 * rol16 - rotate a 16-bit value left
142 * @word: value to rotate
143 * @shift: bits to roll
145 static inline __u16 rol16(__u16 word, unsigned int shift)
147 return (word << shift) | (word >> (16 - shift));
151 * ror16 - rotate a 16-bit value right
152 * @word: value to rotate
153 * @shift: bits to roll
155 static inline __u16 ror16(__u16 word, unsigned int shift)
157 return (word >> shift) | (word << (16 - shift));
161 * rol8 - rotate an 8-bit value left
162 * @word: value to rotate
163 * @shift: bits to roll
165 static inline __u8 rol8(__u8 word, unsigned int shift)
167 return (word << shift) | (word >> (8 - shift));
171 * ror8 - rotate an 8-bit value right
172 * @word: value to rotate
173 * @shift: bits to roll
175 static inline __u8 ror8(__u8 word, unsigned int shift)
177 return (word >> shift) | (word << (8 - shift));
181 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
182 * @value: value to sign extend
183 * @index: 0 based bit index (0<=index<32) to sign bit
185 static inline __s32 sign_extend32(__u32 value, int index)
187 __u8 shift = 31 - index;
188 return (__s32)(value << shift) >> shift;
191 static inline unsigned fls_long(unsigned long l)
199 * __ffs64 - find first set bit in a 64 bit word
200 * @word: The 64 bit word
202 * On 64 bit arches this is a synomyn for __ffs
203 * The result is not defined if no bits are set, so check that @word
204 * is non-zero before calling this.
206 static inline unsigned long __ffs64(u64 word)
208 #if BITS_PER_LONG == 32
209 if (((u32)word) == 0UL)
210 return __ffs((u32)(word >> 32)) + 32;
211 #elif BITS_PER_LONG != 64
212 #error BITS_PER_LONG not 32 or 64
214 return __ffs((unsigned long)word);
219 #ifndef set_mask_bits
220 #define set_mask_bits(ptr, _mask, _bits) \
222 const typeof(*ptr) mask = (_mask), bits = (_bits); \
223 typeof(*ptr) old, new; \
226 old = ACCESS_ONCE(*ptr); \
227 new = (old & ~mask) | bits; \
228 } while (cmpxchg(ptr, old, new) != old); \
234 #ifndef find_last_bit
236 * find_last_bit - find the last set bit in a memory region
237 * @addr: The address to start the search at
238 * @size: The maximum size to search
240 * Returns the bit number of the first set bit, or size.
242 extern unsigned long find_last_bit(const unsigned long *addr,
246 #endif /* __KERNEL__ */