1 #ifndef _LINUX_MATH64_H 2 #define _LINUX_MATH64_H 3 4 #include <linux/types.h> 5 #include <asm/div64.h> 6 7 #if BITS_PER_LONG == 64 8 9 /** 10 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 11 * 12 * This is commonly provided by 32bit archs to provide an optimized 64bit 13 * divide. 14 */ 15 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 16 { 17 *remainder = dividend % divisor; 18 return dividend / divisor; 19 } 20 21 /** 22 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder 23 */ 24 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) 25 { 26 *remainder = dividend % divisor; 27 return dividend / divisor; 28 } 29 30 /** 31 * div64_u64 - unsigned 64bit divide with 64bit divisor 32 */ 33 static inline u64 div64_u64(u64 dividend, u64 divisor) 34 { 35 return dividend / divisor; 36 } 37 38 /** 39 * div64_s64 - signed 64bit divide with 64bit divisor 40 */ 41 static inline s64 div64_s64(s64 dividend, s64 divisor) 42 { 43 return dividend / divisor; 44 } 45 46 #elif BITS_PER_LONG == 32 47 48 #ifndef div_u64_rem 49 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 50 { 51 *remainder = do_div(dividend, divisor); 52 return dividend; 53 } 54 #endif 55 56 #ifndef div_s64_rem 57 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); 58 #endif 59 60 #ifndef div64_u64 61 extern u64 div64_u64(u64 dividend, u64 divisor); 62 #endif 63 64 #ifndef div64_s64 65 extern s64 div64_s64(s64 dividend, s64 divisor); 66 #endif 67 68 #endif /* BITS_PER_LONG */ 69 70 /** 71 * div_u64 - unsigned 64bit divide with 32bit divisor 72 * 73 * This is the most common 64bit divide and should be used if possible, 74 * as many 32bit archs can optimize this variant better than a full 64bit 75 * divide. 76 */ 77 #ifndef div_u64 78 static inline u64 div_u64(u64 dividend, u32 divisor) 79 { 80 u32 remainder; 81 return div_u64_rem(dividend, divisor, &remainder); 82 } 83 #endif 84 85 /** 86 * div_s64 - signed 64bit divide with 32bit divisor 87 */ 88 #ifndef div_s64 89 static inline s64 div_s64(s64 dividend, s32 divisor) 90 { 91 s32 remainder; 92 return div_s64_rem(dividend, divisor, &remainder); 93 } 94 #endif 95 96 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); 97 98 static __always_inline u32 99 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 100 { 101 u32 ret = 0; 102 103 while (dividend >= divisor) { 104 /* The following asm() prevents the compiler from 105 optimising this loop into a modulo operation. */ 106 asm("" : "+rm"(dividend)); 107 108 dividend -= divisor; 109 ret++; 110 } 111 112 *remainder = dividend; 113 114 return ret; 115 } 116 117 #endif /* _LINUX_MATH64_H */ 118