1 #ifndef _LINUX_BITOPS_H 2 #define _LINUX_BITOPS_H 3 #include <asm/types.h> 4 5 #ifdef __KERNEL__ 6 #define BIT(nr) (1UL << (nr)) 7 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) 8 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) 9 #define BITS_PER_BYTE 8 10 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 11 #endif 12 13 /* 14 * Include this here because some architectures need generic_ffs/fls in 15 * scope 16 */ 17 #include <asm/bitops.h> 18 19 #define for_each_set_bit(bit, addr, size) \ 20 for ((bit) = find_first_bit((addr), (size)); \ 21 (bit) < (size); \ 22 (bit) = find_next_bit((addr), (size), (bit) + 1)) 23 24 /* Temporary */ 25 #define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size) 26 27 static __inline__ int get_bitmask_order(unsigned int count) 28 { 29 int order; 30 31 order = fls(count); 32 return order; /* We could be slightly more clever with -1 here... */ 33 } 34 35 static __inline__ int get_count_order(unsigned int count) 36 { 37 int order; 38 39 order = fls(count) - 1; 40 if (count & (count - 1)) 41 order++; 42 return order; 43 } 44 45 static inline unsigned long hweight_long(unsigned long w) 46 { 47 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 48 } 49 50 /* 51 * Clearly slow versions of the hweightN() functions, their benefit is 52 * of course compile time evaluation of constant arguments. 53 */ 54 #define HWEIGHT8(w) \ 55 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ 56 (!!((w) & (1ULL << 0))) + \ 57 (!!((w) & (1ULL << 1))) + \ 58 (!!((w) & (1ULL << 2))) + \ 59 (!!((w) & (1ULL << 3))) + \ 60 (!!((w) & (1ULL << 4))) + \ 61 (!!((w) & (1ULL << 5))) + \ 62 (!!((w) & (1ULL << 6))) + \ 63 (!!((w) & (1ULL << 7))) ) 64 65 #define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) 66 #define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) 67 #define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) 68 69 /* 70 * Type invariant version that simply casts things to the 71 * largest type. 72 */ 73 #define HWEIGHT(w) HWEIGHT64((u64)(w)) 74 75 /** 76 * rol32 - rotate a 32-bit value left 77 * @word: value to rotate 78 * @shift: bits to roll 79 */ 80 static inline __u32 rol32(__u32 word, unsigned int shift) 81 { 82 return (word << shift) | (word >> (32 - shift)); 83 } 84 85 /** 86 * ror32 - rotate a 32-bit value right 87 * @word: value to rotate 88 * @shift: bits to roll 89 */ 90 static inline __u32 ror32(__u32 word, unsigned int shift) 91 { 92 return (word >> shift) | (word << (32 - shift)); 93 } 94 95 /** 96 * rol16 - rotate a 16-bit value left 97 * @word: value to rotate 98 * @shift: bits to roll 99 */ 100 static inline __u16 rol16(__u16 word, unsigned int shift) 101 { 102 return (word << shift) | (word >> (16 - shift)); 103 } 104 105 /** 106 * ror16 - rotate a 16-bit value right 107 * @word: value to rotate 108 * @shift: bits to roll 109 */ 110 static inline __u16 ror16(__u16 word, unsigned int shift) 111 { 112 return (word >> shift) | (word << (16 - shift)); 113 } 114 115 /** 116 * rol8 - rotate an 8-bit value left 117 * @word: value to rotate 118 * @shift: bits to roll 119 */ 120 static inline __u8 rol8(__u8 word, unsigned int shift) 121 { 122 return (word << shift) | (word >> (8 - shift)); 123 } 124 125 /** 126 * ror8 - rotate an 8-bit value right 127 * @word: value to rotate 128 * @shift: bits to roll 129 */ 130 static inline __u8 ror8(__u8 word, unsigned int shift) 131 { 132 return (word >> shift) | (word << (8 - shift)); 133 } 134 135 static inline unsigned fls_long(unsigned long l) 136 { 137 if (sizeof(l) == 4) 138 return fls(l); 139 return fls64(l); 140 } 141 142 /** 143 * __ffs64 - find first set bit in a 64 bit word 144 * @word: The 64 bit word 145 * 146 * On 64 bit arches this is a synomyn for __ffs 147 * The result is not defined if no bits are set, so check that @word 148 * is non-zero before calling this. 149 */ 150 static inline unsigned long __ffs64(u64 word) 151 { 152 #if BITS_PER_LONG == 32 153 if (((u32)word) == 0UL) 154 return __ffs((u32)(word >> 32)) + 32; 155 #elif BITS_PER_LONG != 64 156 #error BITS_PER_LONG not 32 or 64 157 #endif 158 return __ffs((unsigned long)word); 159 } 160 161 #ifdef __KERNEL__ 162 #ifdef CONFIG_GENERIC_FIND_FIRST_BIT 163 164 /** 165 * find_first_bit - find the first set bit in a memory region 166 * @addr: The address to start the search at 167 * @size: The maximum size to search 168 * 169 * Returns the bit number of the first set bit. 170 */ 171 extern unsigned long find_first_bit(const unsigned long *addr, 172 unsigned long size); 173 174 /** 175 * find_first_zero_bit - find the first cleared bit in a memory region 176 * @addr: The address to start the search at 177 * @size: The maximum size to search 178 * 179 * Returns the bit number of the first cleared bit. 180 */ 181 extern unsigned long find_first_zero_bit(const unsigned long *addr, 182 unsigned long size); 183 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 184 185 #ifdef CONFIG_GENERIC_FIND_LAST_BIT 186 /** 187 * find_last_bit - find the last set bit in a memory region 188 * @addr: The address to start the search at 189 * @size: The maximum size to search 190 * 191 * Returns the bit number of the first set bit, or size. 192 */ 193 extern unsigned long find_last_bit(const unsigned long *addr, 194 unsigned long size); 195 #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 196 197 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT 198 199 /** 200 * find_next_bit - find the next set bit in a memory region 201 * @addr: The address to base the search on 202 * @offset: The bitnumber to start searching at 203 * @size: The bitmap size in bits 204 */ 205 extern unsigned long find_next_bit(const unsigned long *addr, 206 unsigned long size, unsigned long offset); 207 208 /** 209 * find_next_zero_bit - find the next cleared bit in a memory region 210 * @addr: The address to base the search on 211 * @offset: The bitnumber to start searching at 212 * @size: The bitmap size in bits 213 */ 214 215 extern unsigned long find_next_zero_bit(const unsigned long *addr, 216 unsigned long size, 217 unsigned long offset); 218 219 #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ 220 #endif /* __KERNEL__ */ 221 #endif 222