xref: /linux-6.15/include/linux/bitops.h (revision e9bcfea1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
4 
5 #include <asm/types.h>
6 #include <linux/bits.h>
7 #include <linux/typecheck.h>
8 
9 #include <uapi/linux/kernel.h>
10 
11 /* Set bits in the first 'n' bytes when loaded from memory */
12 #ifdef __LITTLE_ENDIAN
13 #  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14 #else
15 #  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
16 #endif
17 
18 #define BITS_PER_TYPE(type)	(sizeof(type) * BITS_PER_BYTE)
19 #define BITS_TO_LONGS(nr)	__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
20 #define BITS_TO_U64(nr)		__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
21 #define BITS_TO_U32(nr)		__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
22 #define BITS_TO_BYTES(nr)	__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
23 
24 extern unsigned int __sw_hweight8(unsigned int w);
25 extern unsigned int __sw_hweight16(unsigned int w);
26 extern unsigned int __sw_hweight32(unsigned int w);
27 extern unsigned long __sw_hweight64(__u64 w);
28 
29 /*
30  * Defined here because those may be needed by architecture-specific static
31  * inlines.
32  */
33 
34 #include <asm-generic/bitops/generic-non-atomic.h>
35 
36 /*
37  * Many architecture-specific non-atomic bitops contain inline asm code and due
38  * to that the compiler can't optimize them to compile-time expressions or
39  * constants. In contrary, generic_*() helpers are defined in pure C and
40  * compilers optimize them just well.
41  * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
42  * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
43  * the arguments can be resolved at compile time. That expression itself is a
44  * constant and doesn't bring any functional changes to the rest of cases.
45  * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
46  * passing a bitmap from .bss or .data (-> `!!addr` is always true).
47  */
48 #define bitop(op, nr, addr)						\
49 	((__builtin_constant_p(nr) &&					\
50 	  __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) &&	\
51 	  (uintptr_t)(addr) != (uintptr_t)NULL &&			\
52 	  __builtin_constant_p(*(const unsigned long *)(addr))) ?	\
53 	 const##op(nr, addr) : op(nr, addr))
54 
55 #define __set_bit(nr, addr)		bitop(___set_bit, nr, addr)
56 #define __clear_bit(nr, addr)		bitop(___clear_bit, nr, addr)
57 #define __change_bit(nr, addr)		bitop(___change_bit, nr, addr)
58 #define __test_and_set_bit(nr, addr)	bitop(___test_and_set_bit, nr, addr)
59 #define __test_and_clear_bit(nr, addr)	bitop(___test_and_clear_bit, nr, addr)
60 #define __test_and_change_bit(nr, addr)	bitop(___test_and_change_bit, nr, addr)
61 #define test_bit(nr, addr)		bitop(_test_bit, nr, addr)
62 
63 /*
64  * Include this here because some architectures need generic_ffs/fls in
65  * scope
66  */
67 #include <asm/bitops.h>
68 
69 /* Check that the bitops prototypes are sane */
70 #define __check_bitop_pr(name)						\
71 	static_assert(__same_type(arch_##name, generic_##name) &&	\
72 		      __same_type(const_##name, generic_##name) &&	\
73 		      __same_type(_##name, generic_##name))
74 
75 __check_bitop_pr(__set_bit);
76 __check_bitop_pr(__clear_bit);
77 __check_bitop_pr(__change_bit);
78 __check_bitop_pr(__test_and_set_bit);
79 __check_bitop_pr(__test_and_clear_bit);
80 __check_bitop_pr(__test_and_change_bit);
81 __check_bitop_pr(test_bit);
82 
83 #undef __check_bitop_pr
84 
85 static inline int get_bitmask_order(unsigned int count)
86 {
87 	int order;
88 
89 	order = fls(count);
90 	return order;	/* We could be slightly more clever with -1 here... */
91 }
92 
93 static __always_inline unsigned long hweight_long(unsigned long w)
94 {
95 	return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
96 }
97 
98 /**
99  * rol64 - rotate a 64-bit value left
100  * @word: value to rotate
101  * @shift: bits to roll
102  */
103 static inline __u64 rol64(__u64 word, unsigned int shift)
104 {
105 	return (word << (shift & 63)) | (word >> ((-shift) & 63));
106 }
107 
108 /**
109  * ror64 - rotate a 64-bit value right
110  * @word: value to rotate
111  * @shift: bits to roll
112  */
113 static inline __u64 ror64(__u64 word, unsigned int shift)
114 {
115 	return (word >> (shift & 63)) | (word << ((-shift) & 63));
116 }
117 
118 /**
119  * rol32 - rotate a 32-bit value left
120  * @word: value to rotate
121  * @shift: bits to roll
122  */
123 static inline __u32 rol32(__u32 word, unsigned int shift)
124 {
125 	return (word << (shift & 31)) | (word >> ((-shift) & 31));
126 }
127 
128 /**
129  * ror32 - rotate a 32-bit value right
130  * @word: value to rotate
131  * @shift: bits to roll
132  */
133 static inline __u32 ror32(__u32 word, unsigned int shift)
134 {
135 	return (word >> (shift & 31)) | (word << ((-shift) & 31));
136 }
137 
138 /**
139  * rol16 - rotate a 16-bit value left
140  * @word: value to rotate
141  * @shift: bits to roll
142  */
143 static inline __u16 rol16(__u16 word, unsigned int shift)
144 {
145 	return (word << (shift & 15)) | (word >> ((-shift) & 15));
146 }
147 
148 /**
149  * ror16 - rotate a 16-bit value right
150  * @word: value to rotate
151  * @shift: bits to roll
152  */
153 static inline __u16 ror16(__u16 word, unsigned int shift)
154 {
155 	return (word >> (shift & 15)) | (word << ((-shift) & 15));
156 }
157 
158 /**
159  * rol8 - rotate an 8-bit value left
160  * @word: value to rotate
161  * @shift: bits to roll
162  */
163 static inline __u8 rol8(__u8 word, unsigned int shift)
164 {
165 	return (word << (shift & 7)) | (word >> ((-shift) & 7));
166 }
167 
168 /**
169  * ror8 - rotate an 8-bit value right
170  * @word: value to rotate
171  * @shift: bits to roll
172  */
173 static inline __u8 ror8(__u8 word, unsigned int shift)
174 {
175 	return (word >> (shift & 7)) | (word << ((-shift) & 7));
176 }
177 
178 /**
179  * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
180  * @value: value to sign extend
181  * @index: 0 based bit index (0<=index<32) to sign bit
182  *
183  * This is safe to use for 16- and 8-bit types as well.
184  */
185 static __always_inline __s32 sign_extend32(__u32 value, int index)
186 {
187 	__u8 shift = 31 - index;
188 	return (__s32)(value << shift) >> shift;
189 }
190 
191 /**
192  * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
193  * @value: value to sign extend
194  * @index: 0 based bit index (0<=index<64) to sign bit
195  */
196 static __always_inline __s64 sign_extend64(__u64 value, int index)
197 {
198 	__u8 shift = 63 - index;
199 	return (__s64)(value << shift) >> shift;
200 }
201 
202 static inline unsigned fls_long(unsigned long l)
203 {
204 	if (sizeof(l) == 4)
205 		return fls(l);
206 	return fls64(l);
207 }
208 
209 static inline int get_count_order(unsigned int count)
210 {
211 	if (count == 0)
212 		return -1;
213 
214 	return fls(--count);
215 }
216 
217 /**
218  * get_count_order_long - get order after rounding @l up to power of 2
219  * @l: parameter
220  *
221  * it is same as get_count_order() but with long type parameter
222  */
223 static inline int get_count_order_long(unsigned long l)
224 {
225 	if (l == 0UL)
226 		return -1;
227 	return (int)fls_long(--l);
228 }
229 
230 /**
231  * __ffs64 - find first set bit in a 64 bit word
232  * @word: The 64 bit word
233  *
234  * On 64 bit arches this is a synonym for __ffs
235  * The result is not defined if no bits are set, so check that @word
236  * is non-zero before calling this.
237  */
238 static inline unsigned long __ffs64(u64 word)
239 {
240 #if BITS_PER_LONG == 32
241 	if (((u32)word) == 0UL)
242 		return __ffs((u32)(word >> 32)) + 32;
243 #elif BITS_PER_LONG != 64
244 #error BITS_PER_LONG not 32 or 64
245 #endif
246 	return __ffs((unsigned long)word);
247 }
248 
249 /**
250  * assign_bit - Assign value to a bit in memory
251  * @nr: the bit to set
252  * @addr: the address to start counting from
253  * @value: the value to assign
254  */
255 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
256 				       bool value)
257 {
258 	if (value)
259 		set_bit(nr, addr);
260 	else
261 		clear_bit(nr, addr);
262 }
263 
264 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
265 					 bool value)
266 {
267 	if (value)
268 		__set_bit(nr, addr);
269 	else
270 		__clear_bit(nr, addr);
271 }
272 
273 /**
274  * __ptr_set_bit - Set bit in a pointer's value
275  * @nr: the bit to set
276  * @addr: the address of the pointer variable
277  *
278  * Example:
279  *	void *p = foo();
280  *	__ptr_set_bit(bit, &p);
281  */
282 #define __ptr_set_bit(nr, addr)                         \
283 	({                                              \
284 		typecheck_pointer(*(addr));             \
285 		__set_bit(nr, (unsigned long *)(addr)); \
286 	})
287 
288 /**
289  * __ptr_clear_bit - Clear bit in a pointer's value
290  * @nr: the bit to clear
291  * @addr: the address of the pointer variable
292  *
293  * Example:
294  *	void *p = foo();
295  *	__ptr_clear_bit(bit, &p);
296  */
297 #define __ptr_clear_bit(nr, addr)                         \
298 	({                                                \
299 		typecheck_pointer(*(addr));               \
300 		__clear_bit(nr, (unsigned long *)(addr)); \
301 	})
302 
303 /**
304  * __ptr_test_bit - Test bit in a pointer's value
305  * @nr: the bit to test
306  * @addr: the address of the pointer variable
307  *
308  * Example:
309  *	void *p = foo();
310  *	if (__ptr_test_bit(bit, &p)) {
311  *	        ...
312  *	} else {
313  *		...
314  *	}
315  */
316 #define __ptr_test_bit(nr, addr)                       \
317 	({                                             \
318 		typecheck_pointer(*(addr));            \
319 		test_bit(nr, (unsigned long *)(addr)); \
320 	})
321 
322 #ifdef __KERNEL__
323 
324 #ifndef set_mask_bits
325 #define set_mask_bits(ptr, mask, bits)	\
326 ({								\
327 	const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);	\
328 	typeof(*(ptr)) old__, new__;				\
329 								\
330 	do {							\
331 		old__ = READ_ONCE(*(ptr));			\
332 		new__ = (old__ & ~mask__) | bits__;		\
333 	} while (cmpxchg(ptr, old__, new__) != old__);		\
334 								\
335 	old__;							\
336 })
337 #endif
338 
339 #ifndef bit_clear_unless
340 #define bit_clear_unless(ptr, clear, test)	\
341 ({								\
342 	const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
343 	typeof(*(ptr)) old__, new__;				\
344 								\
345 	do {							\
346 		old__ = READ_ONCE(*(ptr));			\
347 		new__ = old__ & ~clear__;			\
348 	} while (!(old__ & test__) &&				\
349 		 cmpxchg(ptr, old__, new__) != old__);		\
350 								\
351 	!(old__ & test__);					\
352 })
353 #endif
354 
355 #endif /* __KERNEL__ */
356 #endif
357