1 #ifndef _LINUX_BITOPS_H 2 #define _LINUX_BITOPS_H 3 #include <asm/types.h> 4 5 #ifdef __KERNEL__ 6 #define BIT(nr) (1UL << (nr)) 7 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) 8 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) 9 #define BITS_PER_BYTE 8 10 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 11 #endif 12 13 /* 14 * Include this here because some architectures need generic_ffs/fls in 15 * scope 16 */ 17 #include <asm/bitops.h> 18 19 #define for_each_bit(bit, addr, size) \ 20 for ((bit) = find_first_bit((addr), (size)); \ 21 (bit) < (size); \ 22 (bit) = find_next_bit((addr), (size), (bit) + 1)) 23 24 25 static __inline__ int get_bitmask_order(unsigned int count) 26 { 27 int order; 28 29 order = fls(count); 30 return order; /* We could be slightly more clever with -1 here... */ 31 } 32 33 static __inline__ int get_count_order(unsigned int count) 34 { 35 int order; 36 37 order = fls(count) - 1; 38 if (count & (count - 1)) 39 order++; 40 return order; 41 } 42 43 static inline unsigned long hweight_long(unsigned long w) 44 { 45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 46 } 47 48 /* 49 * Clearly slow versions of the hweightN() functions, their benefit is 50 * of course compile time evaluation of constant arguments. 51 */ 52 #define HWEIGHT8(w) \ 53 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ 54 (!!((w) & (1ULL << 0))) + \ 55 (!!((w) & (1ULL << 1))) + \ 56 (!!((w) & (1ULL << 2))) + \ 57 (!!((w) & (1ULL << 3))) + \ 58 (!!((w) & (1ULL << 4))) + \ 59 (!!((w) & (1ULL << 5))) + \ 60 (!!((w) & (1ULL << 6))) + \ 61 (!!((w) & (1ULL << 7))) ) 62 63 #define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) 64 #define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) 65 #define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) 66 67 /* 68 * Type invariant version that simply casts things to the 69 * largest type. 70 */ 71 #define HWEIGHT(w) HWEIGHT64((u64)(w)) 72 73 /** 74 * rol32 - rotate a 32-bit value left 75 * @word: value to rotate 76 * @shift: bits to roll 77 */ 78 static inline __u32 rol32(__u32 word, unsigned int shift) 79 { 80 return (word << shift) | (word >> (32 - shift)); 81 } 82 83 /** 84 * ror32 - rotate a 32-bit value right 85 * @word: value to rotate 86 * @shift: bits to roll 87 */ 88 static inline __u32 ror32(__u32 word, unsigned int shift) 89 { 90 return (word >> shift) | (word << (32 - shift)); 91 } 92 93 /** 94 * rol16 - rotate a 16-bit value left 95 * @word: value to rotate 96 * @shift: bits to roll 97 */ 98 static inline __u16 rol16(__u16 word, unsigned int shift) 99 { 100 return (word << shift) | (word >> (16 - shift)); 101 } 102 103 /** 104 * ror16 - rotate a 16-bit value right 105 * @word: value to rotate 106 * @shift: bits to roll 107 */ 108 static inline __u16 ror16(__u16 word, unsigned int shift) 109 { 110 return (word >> shift) | (word << (16 - shift)); 111 } 112 113 /** 114 * rol8 - rotate an 8-bit value left 115 * @word: value to rotate 116 * @shift: bits to roll 117 */ 118 static inline __u8 rol8(__u8 word, unsigned int shift) 119 { 120 return (word << shift) | (word >> (8 - shift)); 121 } 122 123 /** 124 * ror8 - rotate an 8-bit value right 125 * @word: value to rotate 126 * @shift: bits to roll 127 */ 128 static inline __u8 ror8(__u8 word, unsigned int shift) 129 { 130 return (word >> shift) | (word << (8 - shift)); 131 } 132 133 static inline unsigned fls_long(unsigned long l) 134 { 135 if (sizeof(l) == 4) 136 return fls(l); 137 return fls64(l); 138 } 139 140 /** 141 * __ffs64 - find first set bit in a 64 bit word 142 * @word: The 64 bit word 143 * 144 * On 64 bit arches this is a synomyn for __ffs 145 * The result is not defined if no bits are set, so check that @word 146 * is non-zero before calling this. 147 */ 148 static inline unsigned long __ffs64(u64 word) 149 { 150 #if BITS_PER_LONG == 32 151 if (((u32)word) == 0UL) 152 return __ffs((u32)(word >> 32)) + 32; 153 #elif BITS_PER_LONG != 64 154 #error BITS_PER_LONG not 32 or 64 155 #endif 156 return __ffs((unsigned long)word); 157 } 158 159 #ifdef __KERNEL__ 160 #ifdef CONFIG_GENERIC_FIND_FIRST_BIT 161 162 /** 163 * find_first_bit - find the first set bit in a memory region 164 * @addr: The address to start the search at 165 * @size: The maximum size to search 166 * 167 * Returns the bit number of the first set bit. 168 */ 169 extern unsigned long find_first_bit(const unsigned long *addr, 170 unsigned long size); 171 172 /** 173 * find_first_zero_bit - find the first cleared bit in a memory region 174 * @addr: The address to start the search at 175 * @size: The maximum size to search 176 * 177 * Returns the bit number of the first cleared bit. 178 */ 179 extern unsigned long find_first_zero_bit(const unsigned long *addr, 180 unsigned long size); 181 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 182 183 #ifdef CONFIG_GENERIC_FIND_LAST_BIT 184 /** 185 * find_last_bit - find the last set bit in a memory region 186 * @addr: The address to start the search at 187 * @size: The maximum size to search 188 * 189 * Returns the bit number of the first set bit, or size. 190 */ 191 extern unsigned long find_last_bit(const unsigned long *addr, 192 unsigned long size); 193 #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 194 195 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT 196 197 /** 198 * find_next_bit - find the next set bit in a memory region 199 * @addr: The address to base the search on 200 * @offset: The bitnumber to start searching at 201 * @size: The bitmap size in bits 202 */ 203 extern unsigned long find_next_bit(const unsigned long *addr, 204 unsigned long size, unsigned long offset); 205 206 /** 207 * find_next_zero_bit - find the next cleared bit in a memory region 208 * @addr: The address to base the search on 209 * @offset: The bitnumber to start searching at 210 * @size: The bitmap size in bits 211 */ 212 213 extern unsigned long find_next_zero_bit(const unsigned long *addr, 214 unsigned long size, 215 unsigned long offset); 216 217 #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ 218 #endif /* __KERNEL__ */ 219 #endif 220