xref: /linux-6.15/include/linux/bitops.h (revision 40efcb05)
1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
3 #include <asm/types.h>
4 
5 #ifdef	__KERNEL__
6 #define BIT(nr)			(1UL << (nr))
7 #define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
8 #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
9 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_LONG)
10 #define BITS_PER_BYTE		8
11 #endif
12 
13 /*
14  * Include this here because some architectures need generic_ffs/fls in
15  * scope
16  */
17 #include <asm/bitops.h>
18 
19 #define for_each_bit(bit, addr, size) \
20 	for ((bit) = find_first_bit((addr), (size)); \
21 	     (bit) < (size); \
22 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
23 
24 
25 static __inline__ int get_bitmask_order(unsigned int count)
26 {
27 	int order;
28 
29 	order = fls(count);
30 	return order;	/* We could be slightly more clever with -1 here... */
31 }
32 
33 static __inline__ int get_count_order(unsigned int count)
34 {
35 	int order;
36 
37 	order = fls(count) - 1;
38 	if (count & (count - 1))
39 		order++;
40 	return order;
41 }
42 
43 static inline unsigned long hweight_long(unsigned long w)
44 {
45 	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
46 }
47 
48 /**
49  * rol32 - rotate a 32-bit value left
50  * @word: value to rotate
51  * @shift: bits to roll
52  */
53 static inline __u32 rol32(__u32 word, unsigned int shift)
54 {
55 	return (word << shift) | (word >> (32 - shift));
56 }
57 
58 /**
59  * ror32 - rotate a 32-bit value right
60  * @word: value to rotate
61  * @shift: bits to roll
62  */
63 static inline __u32 ror32(__u32 word, unsigned int shift)
64 {
65 	return (word >> shift) | (word << (32 - shift));
66 }
67 
68 static inline unsigned fls_long(unsigned long l)
69 {
70 	if (sizeof(l) == 4)
71 		return fls(l);
72 	return fls64(l);
73 }
74 
75 #endif
76