1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_BITOPS_H 3 #define _LINUX_BITOPS_H 4 #include <asm/types.h> 5 #include <linux/bits.h> 6 7 /* Set bits in the first 'n' bytes when loaded from memory */ 8 #ifdef __LITTLE_ENDIAN 9 # define aligned_byte_mask(n) ((1UL << 8*(n))-1) 10 #else 11 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) 12 #endif 13 14 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 15 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) 16 17 extern unsigned int __sw_hweight8(unsigned int w); 18 extern unsigned int __sw_hweight16(unsigned int w); 19 extern unsigned int __sw_hweight32(unsigned int w); 20 extern unsigned long __sw_hweight64(__u64 w); 21 22 /* 23 * Include this here because some architectures need generic_ffs/fls in 24 * scope 25 */ 26 #include <asm/bitops.h> 27 28 #define for_each_set_bit(bit, addr, size) \ 29 for ((bit) = find_first_bit((addr), (size)); \ 30 (bit) < (size); \ 31 (bit) = find_next_bit((addr), (size), (bit) + 1)) 32 33 /* same as for_each_set_bit() but use bit as value to start with */ 34 #define for_each_set_bit_from(bit, addr, size) \ 35 for ((bit) = find_next_bit((addr), (size), (bit)); \ 36 (bit) < (size); \ 37 (bit) = find_next_bit((addr), (size), (bit) + 1)) 38 39 #define for_each_clear_bit(bit, addr, size) \ 40 for ((bit) = find_first_zero_bit((addr), (size)); \ 41 (bit) < (size); \ 42 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 43 44 /* same as for_each_clear_bit() but use bit as value to start with */ 45 #define for_each_clear_bit_from(bit, addr, size) \ 46 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ 47 (bit) < (size); \ 48 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 49 50 static inline int get_bitmask_order(unsigned int count) 51 { 52 int order; 53 54 order = fls(count); 55 return order; /* We could be slightly more clever with -1 here... */ 56 } 57 58 static __always_inline unsigned long hweight_long(unsigned long w) 59 { 60 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 61 } 62 63 /** 64 * rol64 - rotate a 64-bit value left 65 * @word: value to rotate 66 * @shift: bits to roll 67 */ 68 static inline __u64 rol64(__u64 word, unsigned int shift) 69 { 70 return (word << (shift & 63)) | (word >> ((-shift) & 63)); 71 } 72 73 /** 74 * ror64 - rotate a 64-bit value right 75 * @word: value to rotate 76 * @shift: bits to roll 77 */ 78 static inline __u64 ror64(__u64 word, unsigned int shift) 79 { 80 return (word >> (shift & 63)) | (word << ((-shift) & 63)); 81 } 82 83 /** 84 * rol32 - rotate a 32-bit value left 85 * @word: value to rotate 86 * @shift: bits to roll 87 */ 88 static inline __u32 rol32(__u32 word, unsigned int shift) 89 { 90 return (word << (shift & 31)) | (word >> ((-shift) & 31)); 91 } 92 93 /** 94 * ror32 - rotate a 32-bit value right 95 * @word: value to rotate 96 * @shift: bits to roll 97 */ 98 static inline __u32 ror32(__u32 word, unsigned int shift) 99 { 100 return (word >> (shift & 31)) | (word << ((-shift) & 31)); 101 } 102 103 /** 104 * rol16 - rotate a 16-bit value left 105 * @word: value to rotate 106 * @shift: bits to roll 107 */ 108 static inline __u16 rol16(__u16 word, unsigned int shift) 109 { 110 return (word << (shift & 15)) | (word >> ((-shift) & 15)); 111 } 112 113 /** 114 * ror16 - rotate a 16-bit value right 115 * @word: value to rotate 116 * @shift: bits to roll 117 */ 118 static inline __u16 ror16(__u16 word, unsigned int shift) 119 { 120 return (word >> (shift & 15)) | (word << ((-shift) & 15)); 121 } 122 123 /** 124 * rol8 - rotate an 8-bit value left 125 * @word: value to rotate 126 * @shift: bits to roll 127 */ 128 static inline __u8 rol8(__u8 word, unsigned int shift) 129 { 130 return (word << (shift & 7)) | (word >> ((-shift) & 7)); 131 } 132 133 /** 134 * ror8 - rotate an 8-bit value right 135 * @word: value to rotate 136 * @shift: bits to roll 137 */ 138 static inline __u8 ror8(__u8 word, unsigned int shift) 139 { 140 return (word >> (shift & 7)) | (word << ((-shift) & 7)); 141 } 142 143 /** 144 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit 145 * @value: value to sign extend 146 * @index: 0 based bit index (0<=index<32) to sign bit 147 * 148 * This is safe to use for 16- and 8-bit types as well. 149 */ 150 static inline __s32 sign_extend32(__u32 value, int index) 151 { 152 __u8 shift = 31 - index; 153 return (__s32)(value << shift) >> shift; 154 } 155 156 /** 157 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit 158 * @value: value to sign extend 159 * @index: 0 based bit index (0<=index<64) to sign bit 160 */ 161 static inline __s64 sign_extend64(__u64 value, int index) 162 { 163 __u8 shift = 63 - index; 164 return (__s64)(value << shift) >> shift; 165 } 166 167 static inline unsigned fls_long(unsigned long l) 168 { 169 if (sizeof(l) == 4) 170 return fls(l); 171 return fls64(l); 172 } 173 174 static inline int get_count_order(unsigned int count) 175 { 176 int order; 177 178 order = fls(count) - 1; 179 if (count & (count - 1)) 180 order++; 181 return order; 182 } 183 184 /** 185 * get_count_order_long - get order after rounding @l up to power of 2 186 * @l: parameter 187 * 188 * it is same as get_count_order() but with long type parameter 189 */ 190 static inline int get_count_order_long(unsigned long l) 191 { 192 if (l == 0UL) 193 return -1; 194 else if (l & (l - 1UL)) 195 return (int)fls_long(l); 196 else 197 return (int)fls_long(l) - 1; 198 } 199 200 /** 201 * __ffs64 - find first set bit in a 64 bit word 202 * @word: The 64 bit word 203 * 204 * On 64 bit arches this is a synomyn for __ffs 205 * The result is not defined if no bits are set, so check that @word 206 * is non-zero before calling this. 207 */ 208 static inline unsigned long __ffs64(u64 word) 209 { 210 #if BITS_PER_LONG == 32 211 if (((u32)word) == 0UL) 212 return __ffs((u32)(word >> 32)) + 32; 213 #elif BITS_PER_LONG != 64 214 #error BITS_PER_LONG not 32 or 64 215 #endif 216 return __ffs((unsigned long)word); 217 } 218 219 /** 220 * assign_bit - Assign value to a bit in memory 221 * @nr: the bit to set 222 * @addr: the address to start counting from 223 * @value: the value to assign 224 */ 225 static __always_inline void assign_bit(long nr, volatile unsigned long *addr, 226 bool value) 227 { 228 if (value) 229 set_bit(nr, addr); 230 else 231 clear_bit(nr, addr); 232 } 233 234 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, 235 bool value) 236 { 237 if (value) 238 __set_bit(nr, addr); 239 else 240 __clear_bit(nr, addr); 241 } 242 243 #ifdef __KERNEL__ 244 245 #ifndef set_mask_bits 246 #define set_mask_bits(ptr, mask, bits) \ 247 ({ \ 248 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ 249 typeof(*(ptr)) old__, new__; \ 250 \ 251 do { \ 252 old__ = READ_ONCE(*(ptr)); \ 253 new__ = (old__ & ~mask__) | bits__; \ 254 } while (cmpxchg(ptr, old__, new__) != old__); \ 255 \ 256 old__; \ 257 }) 258 #endif 259 260 #ifndef bit_clear_unless 261 #define bit_clear_unless(ptr, clear, test) \ 262 ({ \ 263 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ 264 typeof(*(ptr)) old__, new__; \ 265 \ 266 do { \ 267 old__ = READ_ONCE(*(ptr)); \ 268 new__ = old__ & ~clear__; \ 269 } while (!(old__ & test__) && \ 270 cmpxchg(ptr, old__, new__) != old__); \ 271 \ 272 !(old__ & test__); \ 273 }) 274 #endif 275 276 #ifndef find_last_bit 277 /** 278 * find_last_bit - find the last set bit in a memory region 279 * @addr: The address to start the search at 280 * @size: The number of bits to search 281 * 282 * Returns the bit number of the last set bit, or size. 283 */ 284 extern unsigned long find_last_bit(const unsigned long *addr, 285 unsigned long size); 286 #endif 287 288 #endif /* __KERNEL__ */ 289 #endif 290