xref: /linux-6.15/include/linux/bitops.h (revision 9f2c2d6b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
4 
5 #include <asm/types.h>
6 #include <linux/bits.h>
7 #include <linux/typecheck.h>
8 
9 #include <uapi/linux/kernel.h>
10 
11 #define BITS_PER_TYPE(type)	(sizeof(type) * BITS_PER_BYTE)
12 #define BITS_TO_LONGS(nr)	__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
13 #define BITS_TO_U64(nr)		__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
14 #define BITS_TO_U32(nr)		__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
15 #define BITS_TO_BYTES(nr)	__KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
16 
17 extern unsigned int __sw_hweight8(unsigned int w);
18 extern unsigned int __sw_hweight16(unsigned int w);
19 extern unsigned int __sw_hweight32(unsigned int w);
20 extern unsigned long __sw_hweight64(__u64 w);
21 
22 /*
23  * Defined here because those may be needed by architecture-specific static
24  * inlines.
25  */
26 
27 #include <asm-generic/bitops/generic-non-atomic.h>
28 
29 /*
30  * Many architecture-specific non-atomic bitops contain inline asm code and due
31  * to that the compiler can't optimize them to compile-time expressions or
32  * constants. In contrary, generic_*() helpers are defined in pure C and
33  * compilers optimize them just well.
34  * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
35  * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
36  * the arguments can be resolved at compile time. That expression itself is a
37  * constant and doesn't bring any functional changes to the rest of cases.
38  * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
39  * passing a bitmap from .bss or .data (-> `!!addr` is always true).
40  */
41 #define bitop(op, nr, addr)						\
42 	((__builtin_constant_p(nr) &&					\
43 	  __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) &&	\
44 	  (uintptr_t)(addr) != (uintptr_t)NULL &&			\
45 	  __builtin_constant_p(*(const unsigned long *)(addr))) ?	\
46 	 const##op(nr, addr) : op(nr, addr))
47 
48 #define __set_bit(nr, addr)		bitop(___set_bit, nr, addr)
49 #define __clear_bit(nr, addr)		bitop(___clear_bit, nr, addr)
50 #define __change_bit(nr, addr)		bitop(___change_bit, nr, addr)
51 #define __test_and_set_bit(nr, addr)	bitop(___test_and_set_bit, nr, addr)
52 #define __test_and_clear_bit(nr, addr)	bitop(___test_and_clear_bit, nr, addr)
53 #define __test_and_change_bit(nr, addr)	bitop(___test_and_change_bit, nr, addr)
54 #define test_bit(nr, addr)		bitop(_test_bit, nr, addr)
55 #define test_bit_acquire(nr, addr)	bitop(_test_bit_acquire, nr, addr)
56 
57 /*
58  * Include this here because some architectures need generic_ffs/fls in
59  * scope
60  */
61 #include <asm/bitops.h>
62 
63 /* Check that the bitops prototypes are sane */
64 #define __check_bitop_pr(name)						\
65 	static_assert(__same_type(arch_##name, generic_##name) &&	\
66 		      __same_type(const_##name, generic_##name) &&	\
67 		      __same_type(_##name, generic_##name))
68 
69 __check_bitop_pr(__set_bit);
70 __check_bitop_pr(__clear_bit);
71 __check_bitop_pr(__change_bit);
72 __check_bitop_pr(__test_and_set_bit);
73 __check_bitop_pr(__test_and_clear_bit);
74 __check_bitop_pr(__test_and_change_bit);
75 __check_bitop_pr(test_bit);
76 
77 #undef __check_bitop_pr
78 
79 static inline int get_bitmask_order(unsigned int count)
80 {
81 	int order;
82 
83 	order = fls(count);
84 	return order;	/* We could be slightly more clever with -1 here... */
85 }
86 
87 static __always_inline unsigned long hweight_long(unsigned long w)
88 {
89 	return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
90 }
91 
92 /**
93  * rol64 - rotate a 64-bit value left
94  * @word: value to rotate
95  * @shift: bits to roll
96  */
97 static inline __u64 rol64(__u64 word, unsigned int shift)
98 {
99 	return (word << (shift & 63)) | (word >> ((-shift) & 63));
100 }
101 
102 /**
103  * ror64 - rotate a 64-bit value right
104  * @word: value to rotate
105  * @shift: bits to roll
106  */
107 static inline __u64 ror64(__u64 word, unsigned int shift)
108 {
109 	return (word >> (shift & 63)) | (word << ((-shift) & 63));
110 }
111 
112 /**
113  * rol32 - rotate a 32-bit value left
114  * @word: value to rotate
115  * @shift: bits to roll
116  */
117 static inline __u32 rol32(__u32 word, unsigned int shift)
118 {
119 	return (word << (shift & 31)) | (word >> ((-shift) & 31));
120 }
121 
122 /**
123  * ror32 - rotate a 32-bit value right
124  * @word: value to rotate
125  * @shift: bits to roll
126  */
127 static inline __u32 ror32(__u32 word, unsigned int shift)
128 {
129 	return (word >> (shift & 31)) | (word << ((-shift) & 31));
130 }
131 
132 /**
133  * rol16 - rotate a 16-bit value left
134  * @word: value to rotate
135  * @shift: bits to roll
136  */
137 static inline __u16 rol16(__u16 word, unsigned int shift)
138 {
139 	return (word << (shift & 15)) | (word >> ((-shift) & 15));
140 }
141 
142 /**
143  * ror16 - rotate a 16-bit value right
144  * @word: value to rotate
145  * @shift: bits to roll
146  */
147 static inline __u16 ror16(__u16 word, unsigned int shift)
148 {
149 	return (word >> (shift & 15)) | (word << ((-shift) & 15));
150 }
151 
152 /**
153  * rol8 - rotate an 8-bit value left
154  * @word: value to rotate
155  * @shift: bits to roll
156  */
157 static inline __u8 rol8(__u8 word, unsigned int shift)
158 {
159 	return (word << (shift & 7)) | (word >> ((-shift) & 7));
160 }
161 
162 /**
163  * ror8 - rotate an 8-bit value right
164  * @word: value to rotate
165  * @shift: bits to roll
166  */
167 static inline __u8 ror8(__u8 word, unsigned int shift)
168 {
169 	return (word >> (shift & 7)) | (word << ((-shift) & 7));
170 }
171 
172 /**
173  * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
174  * @value: value to sign extend
175  * @index: 0 based bit index (0<=index<32) to sign bit
176  *
177  * This is safe to use for 16- and 8-bit types as well.
178  */
179 static __always_inline __s32 sign_extend32(__u32 value, int index)
180 {
181 	__u8 shift = 31 - index;
182 	return (__s32)(value << shift) >> shift;
183 }
184 
185 /**
186  * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
187  * @value: value to sign extend
188  * @index: 0 based bit index (0<=index<64) to sign bit
189  */
190 static __always_inline __s64 sign_extend64(__u64 value, int index)
191 {
192 	__u8 shift = 63 - index;
193 	return (__s64)(value << shift) >> shift;
194 }
195 
196 static inline unsigned fls_long(unsigned long l)
197 {
198 	if (sizeof(l) == 4)
199 		return fls(l);
200 	return fls64(l);
201 }
202 
203 static inline int get_count_order(unsigned int count)
204 {
205 	if (count == 0)
206 		return -1;
207 
208 	return fls(--count);
209 }
210 
211 /**
212  * get_count_order_long - get order after rounding @l up to power of 2
213  * @l: parameter
214  *
215  * it is same as get_count_order() but with long type parameter
216  */
217 static inline int get_count_order_long(unsigned long l)
218 {
219 	if (l == 0UL)
220 		return -1;
221 	return (int)fls_long(--l);
222 }
223 
224 /**
225  * __ffs64 - find first set bit in a 64 bit word
226  * @word: The 64 bit word
227  *
228  * On 64 bit arches this is a synonym for __ffs
229  * The result is not defined if no bits are set, so check that @word
230  * is non-zero before calling this.
231  */
232 static inline unsigned long __ffs64(u64 word)
233 {
234 #if BITS_PER_LONG == 32
235 	if (((u32)word) == 0UL)
236 		return __ffs((u32)(word >> 32)) + 32;
237 #elif BITS_PER_LONG != 64
238 #error BITS_PER_LONG not 32 or 64
239 #endif
240 	return __ffs((unsigned long)word);
241 }
242 
243 /**
244  * fns - find N'th set bit in a word
245  * @word: The word to search
246  * @n: Bit to find
247  */
248 static inline unsigned long fns(unsigned long word, unsigned int n)
249 {
250 	while (word && n--)
251 		word &= word - 1;
252 
253 	return word ? __ffs(word) : BITS_PER_LONG;
254 }
255 
256 /**
257  * assign_bit - Assign value to a bit in memory
258  * @nr: the bit to set
259  * @addr: the address to start counting from
260  * @value: the value to assign
261  */
262 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
263 				       bool value)
264 {
265 	if (value)
266 		set_bit(nr, addr);
267 	else
268 		clear_bit(nr, addr);
269 }
270 
271 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
272 					 bool value)
273 {
274 	if (value)
275 		__set_bit(nr, addr);
276 	else
277 		__clear_bit(nr, addr);
278 }
279 
280 /**
281  * __ptr_set_bit - Set bit in a pointer's value
282  * @nr: the bit to set
283  * @addr: the address of the pointer variable
284  *
285  * Example:
286  *	void *p = foo();
287  *	__ptr_set_bit(bit, &p);
288  */
289 #define __ptr_set_bit(nr, addr)                         \
290 	({                                              \
291 		typecheck_pointer(*(addr));             \
292 		__set_bit(nr, (unsigned long *)(addr)); \
293 	})
294 
295 /**
296  * __ptr_clear_bit - Clear bit in a pointer's value
297  * @nr: the bit to clear
298  * @addr: the address of the pointer variable
299  *
300  * Example:
301  *	void *p = foo();
302  *	__ptr_clear_bit(bit, &p);
303  */
304 #define __ptr_clear_bit(nr, addr)                         \
305 	({                                                \
306 		typecheck_pointer(*(addr));               \
307 		__clear_bit(nr, (unsigned long *)(addr)); \
308 	})
309 
310 /**
311  * __ptr_test_bit - Test bit in a pointer's value
312  * @nr: the bit to test
313  * @addr: the address of the pointer variable
314  *
315  * Example:
316  *	void *p = foo();
317  *	if (__ptr_test_bit(bit, &p)) {
318  *	        ...
319  *	} else {
320  *		...
321  *	}
322  */
323 #define __ptr_test_bit(nr, addr)                       \
324 	({                                             \
325 		typecheck_pointer(*(addr));            \
326 		test_bit(nr, (unsigned long *)(addr)); \
327 	})
328 
329 #ifdef __KERNEL__
330 
331 #ifndef set_mask_bits
332 #define set_mask_bits(ptr, mask, bits)	\
333 ({								\
334 	const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);	\
335 	typeof(*(ptr)) old__, new__;				\
336 								\
337 	old__ = READ_ONCE(*(ptr));				\
338 	do {							\
339 		new__ = (old__ & ~mask__) | bits__;		\
340 	} while (!try_cmpxchg(ptr, &old__, new__));		\
341 								\
342 	old__;							\
343 })
344 #endif
345 
346 #ifndef bit_clear_unless
347 #define bit_clear_unless(ptr, clear, test)	\
348 ({								\
349 	const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
350 	typeof(*(ptr)) old__, new__;				\
351 								\
352 	old__ = READ_ONCE(*(ptr));				\
353 	do {							\
354 		if (old__ & test__)				\
355 			break;					\
356 		new__ = old__ & ~clear__;			\
357 	} while (!try_cmpxchg(ptr, &old__, new__));		\
358 								\
359 	!(old__ & test__);					\
360 })
361 #endif
362 
363 #endif /* __KERNEL__ */
364 #endif
365