1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_BITOPS_H 3 #define _LINUX_BITOPS_H 4 5 #include <asm/types.h> 6 #include <linux/bits.h> 7 #include <linux/typecheck.h> 8 9 #include <uapi/linux/kernel.h> 10 11 /* Set bits in the first 'n' bytes when loaded from memory */ 12 #ifdef __LITTLE_ENDIAN 13 # define aligned_byte_mask(n) ((1UL << 8*(n))-1) 14 #else 15 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) 16 #endif 17 18 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 19 #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) 20 #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) 21 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) 22 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) 23 24 extern unsigned int __sw_hweight8(unsigned int w); 25 extern unsigned int __sw_hweight16(unsigned int w); 26 extern unsigned int __sw_hweight32(unsigned int w); 27 extern unsigned long __sw_hweight64(__u64 w); 28 29 /* 30 * Defined here because those may be needed by architecture-specific static 31 * inlines. 32 */ 33 34 #include <asm-generic/bitops/generic-non-atomic.h> 35 36 /* 37 * Many architecture-specific non-atomic bitops contain inline asm code and due 38 * to that the compiler can't optimize them to compile-time expressions or 39 * constants. In contrary, generic_*() helpers are defined in pure C and 40 * compilers optimize them just well. 41 * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively 42 * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when 43 * the arguments can be resolved at compile time. That expression itself is a 44 * constant and doesn't bring any functional changes to the rest of cases. 45 * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when 46 * passing a bitmap from .bss or .data (-> `!!addr` is always true). 47 */ 48 #define bitop(op, nr, addr) \ 49 ((__builtin_constant_p(nr) && \ 50 __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \ 51 (uintptr_t)(addr) != (uintptr_t)NULL && \ 52 __builtin_constant_p(*(const unsigned long *)(addr))) ? \ 53 const##op(nr, addr) : op(nr, addr)) 54 55 #define __set_bit(nr, addr) bitop(___set_bit, nr, addr) 56 #define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) 57 #define __change_bit(nr, addr) bitop(___change_bit, nr, addr) 58 #define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) 59 #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) 60 #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) 61 #define test_bit(nr, addr) bitop(_test_bit, nr, addr) 62 #define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr) 63 64 /* 65 * Include this here because some architectures need generic_ffs/fls in 66 * scope 67 */ 68 #include <asm/bitops.h> 69 70 /* Check that the bitops prototypes are sane */ 71 #define __check_bitop_pr(name) \ 72 static_assert(__same_type(arch_##name, generic_##name) && \ 73 __same_type(const_##name, generic_##name) && \ 74 __same_type(_##name, generic_##name)) 75 76 __check_bitop_pr(__set_bit); 77 __check_bitop_pr(__clear_bit); 78 __check_bitop_pr(__change_bit); 79 __check_bitop_pr(__test_and_set_bit); 80 __check_bitop_pr(__test_and_clear_bit); 81 __check_bitop_pr(__test_and_change_bit); 82 __check_bitop_pr(test_bit); 83 84 #undef __check_bitop_pr 85 86 static inline int get_bitmask_order(unsigned int count) 87 { 88 int order; 89 90 order = fls(count); 91 return order; /* We could be slightly more clever with -1 here... */ 92 } 93 94 static __always_inline unsigned long hweight_long(unsigned long w) 95 { 96 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); 97 } 98 99 /** 100 * rol64 - rotate a 64-bit value left 101 * @word: value to rotate 102 * @shift: bits to roll 103 */ 104 static inline __u64 rol64(__u64 word, unsigned int shift) 105 { 106 return (word << (shift & 63)) | (word >> ((-shift) & 63)); 107 } 108 109 /** 110 * ror64 - rotate a 64-bit value right 111 * @word: value to rotate 112 * @shift: bits to roll 113 */ 114 static inline __u64 ror64(__u64 word, unsigned int shift) 115 { 116 return (word >> (shift & 63)) | (word << ((-shift) & 63)); 117 } 118 119 /** 120 * rol32 - rotate a 32-bit value left 121 * @word: value to rotate 122 * @shift: bits to roll 123 */ 124 static inline __u32 rol32(__u32 word, unsigned int shift) 125 { 126 return (word << (shift & 31)) | (word >> ((-shift) & 31)); 127 } 128 129 /** 130 * ror32 - rotate a 32-bit value right 131 * @word: value to rotate 132 * @shift: bits to roll 133 */ 134 static inline __u32 ror32(__u32 word, unsigned int shift) 135 { 136 return (word >> (shift & 31)) | (word << ((-shift) & 31)); 137 } 138 139 /** 140 * rol16 - rotate a 16-bit value left 141 * @word: value to rotate 142 * @shift: bits to roll 143 */ 144 static inline __u16 rol16(__u16 word, unsigned int shift) 145 { 146 return (word << (shift & 15)) | (word >> ((-shift) & 15)); 147 } 148 149 /** 150 * ror16 - rotate a 16-bit value right 151 * @word: value to rotate 152 * @shift: bits to roll 153 */ 154 static inline __u16 ror16(__u16 word, unsigned int shift) 155 { 156 return (word >> (shift & 15)) | (word << ((-shift) & 15)); 157 } 158 159 /** 160 * rol8 - rotate an 8-bit value left 161 * @word: value to rotate 162 * @shift: bits to roll 163 */ 164 static inline __u8 rol8(__u8 word, unsigned int shift) 165 { 166 return (word << (shift & 7)) | (word >> ((-shift) & 7)); 167 } 168 169 /** 170 * ror8 - rotate an 8-bit value right 171 * @word: value to rotate 172 * @shift: bits to roll 173 */ 174 static inline __u8 ror8(__u8 word, unsigned int shift) 175 { 176 return (word >> (shift & 7)) | (word << ((-shift) & 7)); 177 } 178 179 /** 180 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit 181 * @value: value to sign extend 182 * @index: 0 based bit index (0<=index<32) to sign bit 183 * 184 * This is safe to use for 16- and 8-bit types as well. 185 */ 186 static __always_inline __s32 sign_extend32(__u32 value, int index) 187 { 188 __u8 shift = 31 - index; 189 return (__s32)(value << shift) >> shift; 190 } 191 192 /** 193 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit 194 * @value: value to sign extend 195 * @index: 0 based bit index (0<=index<64) to sign bit 196 */ 197 static __always_inline __s64 sign_extend64(__u64 value, int index) 198 { 199 __u8 shift = 63 - index; 200 return (__s64)(value << shift) >> shift; 201 } 202 203 static inline unsigned fls_long(unsigned long l) 204 { 205 if (sizeof(l) == 4) 206 return fls(l); 207 return fls64(l); 208 } 209 210 static inline int get_count_order(unsigned int count) 211 { 212 if (count == 0) 213 return -1; 214 215 return fls(--count); 216 } 217 218 /** 219 * get_count_order_long - get order after rounding @l up to power of 2 220 * @l: parameter 221 * 222 * it is same as get_count_order() but with long type parameter 223 */ 224 static inline int get_count_order_long(unsigned long l) 225 { 226 if (l == 0UL) 227 return -1; 228 return (int)fls_long(--l); 229 } 230 231 /** 232 * __ffs64 - find first set bit in a 64 bit word 233 * @word: The 64 bit word 234 * 235 * On 64 bit arches this is a synonym for __ffs 236 * The result is not defined if no bits are set, so check that @word 237 * is non-zero before calling this. 238 */ 239 static inline unsigned long __ffs64(u64 word) 240 { 241 #if BITS_PER_LONG == 32 242 if (((u32)word) == 0UL) 243 return __ffs((u32)(word >> 32)) + 32; 244 #elif BITS_PER_LONG != 64 245 #error BITS_PER_LONG not 32 or 64 246 #endif 247 return __ffs((unsigned long)word); 248 } 249 250 /** 251 * fns - find N'th set bit in a word 252 * @word: The word to search 253 * @n: Bit to find 254 */ 255 static inline unsigned long fns(unsigned long word, unsigned int n) 256 { 257 while (word && n--) 258 word &= word - 1; 259 260 return word ? __ffs(word) : BITS_PER_LONG; 261 } 262 263 /** 264 * assign_bit - Assign value to a bit in memory 265 * @nr: the bit to set 266 * @addr: the address to start counting from 267 * @value: the value to assign 268 */ 269 static __always_inline void assign_bit(long nr, volatile unsigned long *addr, 270 bool value) 271 { 272 if (value) 273 set_bit(nr, addr); 274 else 275 clear_bit(nr, addr); 276 } 277 278 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, 279 bool value) 280 { 281 if (value) 282 __set_bit(nr, addr); 283 else 284 __clear_bit(nr, addr); 285 } 286 287 /** 288 * __ptr_set_bit - Set bit in a pointer's value 289 * @nr: the bit to set 290 * @addr: the address of the pointer variable 291 * 292 * Example: 293 * void *p = foo(); 294 * __ptr_set_bit(bit, &p); 295 */ 296 #define __ptr_set_bit(nr, addr) \ 297 ({ \ 298 typecheck_pointer(*(addr)); \ 299 __set_bit(nr, (unsigned long *)(addr)); \ 300 }) 301 302 /** 303 * __ptr_clear_bit - Clear bit in a pointer's value 304 * @nr: the bit to clear 305 * @addr: the address of the pointer variable 306 * 307 * Example: 308 * void *p = foo(); 309 * __ptr_clear_bit(bit, &p); 310 */ 311 #define __ptr_clear_bit(nr, addr) \ 312 ({ \ 313 typecheck_pointer(*(addr)); \ 314 __clear_bit(nr, (unsigned long *)(addr)); \ 315 }) 316 317 /** 318 * __ptr_test_bit - Test bit in a pointer's value 319 * @nr: the bit to test 320 * @addr: the address of the pointer variable 321 * 322 * Example: 323 * void *p = foo(); 324 * if (__ptr_test_bit(bit, &p)) { 325 * ... 326 * } else { 327 * ... 328 * } 329 */ 330 #define __ptr_test_bit(nr, addr) \ 331 ({ \ 332 typecheck_pointer(*(addr)); \ 333 test_bit(nr, (unsigned long *)(addr)); \ 334 }) 335 336 #ifdef __KERNEL__ 337 338 #ifndef set_mask_bits 339 #define set_mask_bits(ptr, mask, bits) \ 340 ({ \ 341 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ 342 typeof(*(ptr)) old__, new__; \ 343 \ 344 old__ = READ_ONCE(*(ptr)); \ 345 do { \ 346 new__ = (old__ & ~mask__) | bits__; \ 347 } while (!try_cmpxchg(ptr, &old__, new__)); \ 348 \ 349 old__; \ 350 }) 351 #endif 352 353 #ifndef bit_clear_unless 354 #define bit_clear_unless(ptr, clear, test) \ 355 ({ \ 356 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ 357 typeof(*(ptr)) old__, new__; \ 358 \ 359 old__ = READ_ONCE(*(ptr)); \ 360 do { \ 361 if (old__ & test__) \ 362 break; \ 363 new__ = old__ & ~clear__; \ 364 } while (!try_cmpxchg(ptr, &old__, new__)); \ 365 \ 366 !(old__ & test__); \ 367 }) 368 #endif 369 370 #endif /* __KERNEL__ */ 371 #endif 372