1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_BITOPS_H 3 #define _LINUX_BITOPS_H 4 #include <asm/types.h> 5 6 #ifdef __KERNEL__ 7 #define BIT(nr) (1UL << (nr)) 8 #define BIT_ULL(nr) (1ULL << (nr)) 9 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) 10 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) 11 #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) 12 #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) 13 #define BITS_PER_BYTE 8 14 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 15 #endif 16 17 /* 18 * Create a contiguous bitmask starting at bit position @l and ending at 19 * position @h. For example 20 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 21 */ 22 #define GENMASK(h, l) \ 23 (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) 24 25 #define GENMASK_ULL(h, l) \ 26 (((~0ULL) - (1ULL << (l)) + 1) & \ 27 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) 28 29 extern unsigned int __sw_hweight8(unsigned int w); 30 extern unsigned int __sw_hweight16(unsigned int w); 31 extern unsigned int __sw_hweight32(unsigned int w); 32 extern unsigned long __sw_hweight64(__u64 w); 33 34 /* 35 * Include this here because some architectures need generic_ffs/fls in 36 * scope 37 */ 38 #include <asm/bitops.h> 39 40 #define for_each_set_bit(bit, addr, size) \ 41 for ((bit) = find_first_bit((addr), (size)); \ 42 (bit) < (size); \ 43 (bit) = find_next_bit((addr), (size), (bit) + 1)) 44 45 /* same as for_each_set_bit() but use bit as value to start with */ 46 #define for_each_set_bit_from(bit, addr, size) \ 47 for ((bit) = find_next_bit((addr), (size), (bit)); \ 48 (bit) < (size); \ 49 (bit) = find_next_bit((addr), (size), (bit) + 1)) 50 51 #define for_each_clear_bit(bit, addr, size) \ 52 for ((bit) = find_first_zero_bit((addr), (size)); \ 53 (bit) < (size); \ 54 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 55 56 /* same as for_each_clear_bit() but use bit as value to start with */ 57 #define for_each_clear_bit_from(bit, addr, size) \ 58 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ 59 (bit) < (size); \ 60 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 61 62 static inline int get_bitmask_order(unsigned int count) 63 { 64 int order; 65 66 order = fls(count); 67 return order; /* We could be slightly more clever with -1 here... */ 68 } 69 70 static __always_inline unsigned long hweight_long(unsigned long w) 71 { 72 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 73 } 74 75 /** 76 * rol64 - rotate a 64-bit value left 77 * @word: value to rotate 78 * @shift: bits to roll 79 */ 80 static inline __u64 rol64(__u64 word, unsigned int shift) 81 { 82 return (word << shift) | (word >> (64 - shift)); 83 } 84 85 /** 86 * ror64 - rotate a 64-bit value right 87 * @word: value to rotate 88 * @shift: bits to roll 89 */ 90 static inline __u64 ror64(__u64 word, unsigned int shift) 91 { 92 return (word >> shift) | (word << (64 - shift)); 93 } 94 95 /** 96 * rol32 - rotate a 32-bit value left 97 * @word: value to rotate 98 * @shift: bits to roll 99 */ 100 static inline __u32 rol32(__u32 word, unsigned int shift) 101 { 102 return (word << shift) | (word >> ((-shift) & 31)); 103 } 104 105 /** 106 * ror32 - rotate a 32-bit value right 107 * @word: value to rotate 108 * @shift: bits to roll 109 */ 110 static inline __u32 ror32(__u32 word, unsigned int shift) 111 { 112 return (word >> shift) | (word << (32 - shift)); 113 } 114 115 /** 116 * rol16 - rotate a 16-bit value left 117 * @word: value to rotate 118 * @shift: bits to roll 119 */ 120 static inline __u16 rol16(__u16 word, unsigned int shift) 121 { 122 return (word << shift) | (word >> (16 - shift)); 123 } 124 125 /** 126 * ror16 - rotate a 16-bit value right 127 * @word: value to rotate 128 * @shift: bits to roll 129 */ 130 static inline __u16 ror16(__u16 word, unsigned int shift) 131 { 132 return (word >> shift) | (word << (16 - shift)); 133 } 134 135 /** 136 * rol8 - rotate an 8-bit value left 137 * @word: value to rotate 138 * @shift: bits to roll 139 */ 140 static inline __u8 rol8(__u8 word, unsigned int shift) 141 { 142 return (word << shift) | (word >> (8 - shift)); 143 } 144 145 /** 146 * ror8 - rotate an 8-bit value right 147 * @word: value to rotate 148 * @shift: bits to roll 149 */ 150 static inline __u8 ror8(__u8 word, unsigned int shift) 151 { 152 return (word >> shift) | (word << (8 - shift)); 153 } 154 155 /** 156 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit 157 * @value: value to sign extend 158 * @index: 0 based bit index (0<=index<32) to sign bit 159 * 160 * This is safe to use for 16- and 8-bit types as well. 161 */ 162 static inline __s32 sign_extend32(__u32 value, int index) 163 { 164 __u8 shift = 31 - index; 165 return (__s32)(value << shift) >> shift; 166 } 167 168 /** 169 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit 170 * @value: value to sign extend 171 * @index: 0 based bit index (0<=index<64) to sign bit 172 */ 173 static inline __s64 sign_extend64(__u64 value, int index) 174 { 175 __u8 shift = 63 - index; 176 return (__s64)(value << shift) >> shift; 177 } 178 179 static inline unsigned fls_long(unsigned long l) 180 { 181 if (sizeof(l) == 4) 182 return fls(l); 183 return fls64(l); 184 } 185 186 static inline int get_count_order(unsigned int count) 187 { 188 int order; 189 190 order = fls(count) - 1; 191 if (count & (count - 1)) 192 order++; 193 return order; 194 } 195 196 /** 197 * get_count_order_long - get order after rounding @l up to power of 2 198 * @l: parameter 199 * 200 * it is same as get_count_order() but with long type parameter 201 */ 202 static inline int get_count_order_long(unsigned long l) 203 { 204 if (l == 0UL) 205 return -1; 206 else if (l & (l - 1UL)) 207 return (int)fls_long(l); 208 else 209 return (int)fls_long(l) - 1; 210 } 211 212 /** 213 * __ffs64 - find first set bit in a 64 bit word 214 * @word: The 64 bit word 215 * 216 * On 64 bit arches this is a synomyn for __ffs 217 * The result is not defined if no bits are set, so check that @word 218 * is non-zero before calling this. 219 */ 220 static inline unsigned long __ffs64(u64 word) 221 { 222 #if BITS_PER_LONG == 32 223 if (((u32)word) == 0UL) 224 return __ffs((u32)(word >> 32)) + 32; 225 #elif BITS_PER_LONG != 64 226 #error BITS_PER_LONG not 32 or 64 227 #endif 228 return __ffs((unsigned long)word); 229 } 230 231 #ifdef __KERNEL__ 232 233 #ifndef set_mask_bits 234 #define set_mask_bits(ptr, _mask, _bits) \ 235 ({ \ 236 const typeof(*ptr) mask = (_mask), bits = (_bits); \ 237 typeof(*ptr) old, new; \ 238 \ 239 do { \ 240 old = READ_ONCE(*ptr); \ 241 new = (old & ~mask) | bits; \ 242 } while (cmpxchg(ptr, old, new) != old); \ 243 \ 244 new; \ 245 }) 246 #endif 247 248 #ifndef bit_clear_unless 249 #define bit_clear_unless(ptr, _clear, _test) \ 250 ({ \ 251 const typeof(*ptr) clear = (_clear), test = (_test); \ 252 typeof(*ptr) old, new; \ 253 \ 254 do { \ 255 old = READ_ONCE(*ptr); \ 256 new = old & ~clear; \ 257 } while (!(old & test) && \ 258 cmpxchg(ptr, old, new) != old); \ 259 \ 260 !(old & test); \ 261 }) 262 #endif 263 264 #ifndef find_last_bit 265 /** 266 * find_last_bit - find the last set bit in a memory region 267 * @addr: The address to start the search at 268 * @size: The number of bits to search 269 * 270 * Returns the bit number of the last set bit, or size. 271 */ 272 extern unsigned long find_last_bit(const unsigned long *addr, 273 unsigned long size); 274 #endif 275 276 #endif /* __KERNEL__ */ 277 #endif 278