1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
22418f4f2SRoman Zippel #ifndef _LINUX_MATH64_H
32418f4f2SRoman Zippel #define _LINUX_MATH64_H
42418f4f2SRoman Zippel
52418f4f2SRoman Zippel #include <linux/types.h>
6605a140aSIlias Stamatis #include <linux/math.h>
72418f4f2SRoman Zippel #include <asm/div64.h>
81beb35ecSAdrian Hunter #include <vdso/math64.h>
92418f4f2SRoman Zippel
102418f4f2SRoman Zippel #if BITS_PER_LONG == 64
112418f4f2SRoman Zippel
12f910381aSSasha Levin #define div64_long(x, y) div64_s64((x), (y))
13c2853c8dSAlex Shi #define div64_ul(x, y) div64_u64((x), (y))
14f910381aSSasha Levin
152418f4f2SRoman Zippel /**
162418f4f2SRoman Zippel * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
17078843f7SRandy Dunlap * @dividend: unsigned 64bit dividend
18078843f7SRandy Dunlap * @divisor: unsigned 32bit divisor
19078843f7SRandy Dunlap * @remainder: pointer to unsigned 32bit remainder
20078843f7SRandy Dunlap *
21078843f7SRandy Dunlap * Return: sets ``*remainder``, then returns dividend / divisor
222418f4f2SRoman Zippel *
232418f4f2SRoman Zippel * This is commonly provided by 32bit archs to provide an optimized 64bit
242418f4f2SRoman Zippel * divide.
252418f4f2SRoman Zippel */
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)262418f4f2SRoman Zippel static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
272418f4f2SRoman Zippel {
282418f4f2SRoman Zippel *remainder = dividend % divisor;
292418f4f2SRoman Zippel return dividend / divisor;
302418f4f2SRoman Zippel }
312418f4f2SRoman Zippel
32d28a1de5SLiam Beguin /**
332418f4f2SRoman Zippel * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
34078843f7SRandy Dunlap * @dividend: signed 64bit dividend
35078843f7SRandy Dunlap * @divisor: signed 32bit divisor
36078843f7SRandy Dunlap * @remainder: pointer to signed 32bit remainder
37078843f7SRandy Dunlap *
38078843f7SRandy Dunlap * Return: sets ``*remainder``, then returns dividend / divisor
392418f4f2SRoman Zippel */
div_s64_rem(s64 dividend,s32 divisor,s32 * remainder)402418f4f2SRoman Zippel static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
412418f4f2SRoman Zippel {
422418f4f2SRoman Zippel *remainder = dividend % divisor;
432418f4f2SRoman Zippel return dividend / divisor;
442418f4f2SRoman Zippel }
452418f4f2SRoman Zippel
46d28a1de5SLiam Beguin /**
47eb18cba7SMike Snitzer * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
48078843f7SRandy Dunlap * @dividend: unsigned 64bit dividend
49078843f7SRandy Dunlap * @divisor: unsigned 64bit divisor
50078843f7SRandy Dunlap * @remainder: pointer to unsigned 64bit remainder
51078843f7SRandy Dunlap *
52078843f7SRandy Dunlap * Return: sets ``*remainder``, then returns dividend / divisor
53eb18cba7SMike Snitzer */
div64_u64_rem(u64 dividend,u64 divisor,u64 * remainder)54eb18cba7SMike Snitzer static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
55eb18cba7SMike Snitzer {
56eb18cba7SMike Snitzer *remainder = dividend % divisor;
57eb18cba7SMike Snitzer return dividend / divisor;
58eb18cba7SMike Snitzer }
59eb18cba7SMike Snitzer
60d28a1de5SLiam Beguin /**
616f6d6a1aSRoman Zippel * div64_u64 - unsigned 64bit divide with 64bit divisor
62078843f7SRandy Dunlap * @dividend: unsigned 64bit dividend
63078843f7SRandy Dunlap * @divisor: unsigned 64bit divisor
64078843f7SRandy Dunlap *
65078843f7SRandy Dunlap * Return: dividend / divisor
666f6d6a1aSRoman Zippel */
div64_u64(u64 dividend,u64 divisor)676f6d6a1aSRoman Zippel static inline u64 div64_u64(u64 dividend, u64 divisor)
686f6d6a1aSRoman Zippel {
696f6d6a1aSRoman Zippel return dividend / divisor;
706f6d6a1aSRoman Zippel }
716f6d6a1aSRoman Zippel
72d28a1de5SLiam Beguin /**
73658716d1SBrian Behlendorf * div64_s64 - signed 64bit divide with 64bit divisor
74078843f7SRandy Dunlap * @dividend: signed 64bit dividend
75078843f7SRandy Dunlap * @divisor: signed 64bit divisor
76078843f7SRandy Dunlap *
77078843f7SRandy Dunlap * Return: dividend / divisor
78658716d1SBrian Behlendorf */
div64_s64(s64 dividend,s64 divisor)79658716d1SBrian Behlendorf static inline s64 div64_s64(s64 dividend, s64 divisor)
80658716d1SBrian Behlendorf {
81658716d1SBrian Behlendorf return dividend / divisor;
82658716d1SBrian Behlendorf }
83658716d1SBrian Behlendorf
842418f4f2SRoman Zippel #elif BITS_PER_LONG == 32
852418f4f2SRoman Zippel
86f910381aSSasha Levin #define div64_long(x, y) div_s64((x), (y))
87c2853c8dSAlex Shi #define div64_ul(x, y) div_u64((x), (y))
88f910381aSSasha Levin
892418f4f2SRoman Zippel #ifndef div_u64_rem
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)902418f4f2SRoman Zippel static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
912418f4f2SRoman Zippel {
922418f4f2SRoman Zippel *remainder = do_div(dividend, divisor);
932418f4f2SRoman Zippel return dividend;
942418f4f2SRoman Zippel }
952418f4f2SRoman Zippel #endif
962418f4f2SRoman Zippel
972418f4f2SRoman Zippel #ifndef div_s64_rem
982418f4f2SRoman Zippel extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
992418f4f2SRoman Zippel #endif
1002418f4f2SRoman Zippel
101eb18cba7SMike Snitzer #ifndef div64_u64_rem
102eb18cba7SMike Snitzer extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
103eb18cba7SMike Snitzer #endif
104eb18cba7SMike Snitzer
1056f6d6a1aSRoman Zippel #ifndef div64_u64
106f3002134SStanislaw Gruszka extern u64 div64_u64(u64 dividend, u64 divisor);
1076f6d6a1aSRoman Zippel #endif
1086f6d6a1aSRoman Zippel
109658716d1SBrian Behlendorf #ifndef div64_s64
110658716d1SBrian Behlendorf extern s64 div64_s64(s64 dividend, s64 divisor);
111658716d1SBrian Behlendorf #endif
112658716d1SBrian Behlendorf
1132418f4f2SRoman Zippel #endif /* BITS_PER_LONG */
1142418f4f2SRoman Zippel
1152418f4f2SRoman Zippel /**
1162418f4f2SRoman Zippel * div_u64 - unsigned 64bit divide with 32bit divisor
117078843f7SRandy Dunlap * @dividend: unsigned 64bit dividend
118078843f7SRandy Dunlap * @divisor: unsigned 32bit divisor
1192418f4f2SRoman Zippel *
1202418f4f2SRoman Zippel * This is the most common 64bit divide and should be used if possible,
1212418f4f2SRoman Zippel * as many 32bit archs can optimize this variant better than a full 64bit
1222418f4f2SRoman Zippel * divide.
123a898db21SLiam Beguin *
124a898db21SLiam Beguin * Return: dividend / divisor
1252418f4f2SRoman Zippel */
1262418f4f2SRoman Zippel #ifndef div_u64
div_u64(u64 dividend,u32 divisor)1272418f4f2SRoman Zippel static inline u64 div_u64(u64 dividend, u32 divisor)
1282418f4f2SRoman Zippel {
1292418f4f2SRoman Zippel u32 remainder;
1302418f4f2SRoman Zippel return div_u64_rem(dividend, divisor, &remainder);
1312418f4f2SRoman Zippel }
1322418f4f2SRoman Zippel #endif
1332418f4f2SRoman Zippel
1342418f4f2SRoman Zippel /**
1352418f4f2SRoman Zippel * div_s64 - signed 64bit divide with 32bit divisor
136078843f7SRandy Dunlap * @dividend: signed 64bit dividend
137078843f7SRandy Dunlap * @divisor: signed 32bit divisor
138a898db21SLiam Beguin *
139a898db21SLiam Beguin * Return: dividend / divisor
1402418f4f2SRoman Zippel */
1412418f4f2SRoman Zippel #ifndef div_s64
div_s64(s64 dividend,s32 divisor)1422418f4f2SRoman Zippel static inline s64 div_s64(s64 dividend, s32 divisor)
1432418f4f2SRoman Zippel {
1442418f4f2SRoman Zippel s32 remainder;
1452418f4f2SRoman Zippel return div_s64_rem(dividend, divisor, &remainder);
1462418f4f2SRoman Zippel }
1472418f4f2SRoman Zippel #endif
1482418f4f2SRoman Zippel
149f595ec96SJeremy Fitzhardinge u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
150f595ec96SJeremy Fitzhardinge
1519e3d6223SPeter Zijlstra #ifndef mul_u32_u32
1529e3d6223SPeter Zijlstra /*
1539e3d6223SPeter Zijlstra * Many a GCC version messes this up and generates a 64x64 mult :-(
1549e3d6223SPeter Zijlstra */
mul_u32_u32(u32 a,u32 b)1559e3d6223SPeter Zijlstra static inline u64 mul_u32_u32(u32 a, u32 b)
1569e3d6223SPeter Zijlstra {
1579e3d6223SPeter Zijlstra return (u64)a * b;
1589e3d6223SPeter Zijlstra }
1599e3d6223SPeter Zijlstra #endif
1609e3d6223SPeter Zijlstra
161be5e610cSPeter Zijlstra #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
162be5e610cSPeter Zijlstra
163be5e610cSPeter Zijlstra #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)1648739c681SPeter Zijlstra static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
165be5e610cSPeter Zijlstra {
166be5e610cSPeter Zijlstra return (u64)(((unsigned __int128)a * mul) >> shift);
167be5e610cSPeter Zijlstra }
168be5e610cSPeter Zijlstra #endif /* mul_u64_u32_shr */
169be5e610cSPeter Zijlstra
17035181e86SHaozhong Zhang #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 mul,unsigned int shift)171fc4a0db4SPeter Zijlstra static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
17235181e86SHaozhong Zhang {
17335181e86SHaozhong Zhang return (u64)(((unsigned __int128)a * mul) >> shift);
17435181e86SHaozhong Zhang }
17535181e86SHaozhong Zhang #endif /* mul_u64_u64_shr */
17635181e86SHaozhong Zhang
177be5e610cSPeter Zijlstra #else
178be5e610cSPeter Zijlstra
179be5e610cSPeter Zijlstra #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)1808739c681SPeter Zijlstra static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
181be5e610cSPeter Zijlstra {
1825e5e5142SAdrian Hunter u32 ah = a >> 32, al = a;
183be5e610cSPeter Zijlstra u64 ret;
184be5e610cSPeter Zijlstra
1859e3d6223SPeter Zijlstra ret = mul_u32_u32(al, mul) >> shift;
186be5e610cSPeter Zijlstra if (ah)
1879e3d6223SPeter Zijlstra ret += mul_u32_u32(ah, mul) << (32 - shift);
188be5e610cSPeter Zijlstra return ret;
189be5e610cSPeter Zijlstra }
190be5e610cSPeter Zijlstra #endif /* mul_u64_u32_shr */
191be5e610cSPeter Zijlstra
19235181e86SHaozhong Zhang #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 b,unsigned int shift)19335181e86SHaozhong Zhang static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
19435181e86SHaozhong Zhang {
19535181e86SHaozhong Zhang union {
19635181e86SHaozhong Zhang u64 ll;
19735181e86SHaozhong Zhang struct {
19835181e86SHaozhong Zhang #ifdef __BIG_ENDIAN
19935181e86SHaozhong Zhang u32 high, low;
20035181e86SHaozhong Zhang #else
20135181e86SHaozhong Zhang u32 low, high;
20235181e86SHaozhong Zhang #endif
20335181e86SHaozhong Zhang } l;
20435181e86SHaozhong Zhang } rl, rm, rn, rh, a0, b0;
20535181e86SHaozhong Zhang u64 c;
20635181e86SHaozhong Zhang
20735181e86SHaozhong Zhang a0.ll = a;
20835181e86SHaozhong Zhang b0.ll = b;
20935181e86SHaozhong Zhang
2109e3d6223SPeter Zijlstra rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
2119e3d6223SPeter Zijlstra rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
2129e3d6223SPeter Zijlstra rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
2139e3d6223SPeter Zijlstra rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
21435181e86SHaozhong Zhang
21535181e86SHaozhong Zhang /*
21635181e86SHaozhong Zhang * Each of these lines computes a 64-bit intermediate result into "c",
21735181e86SHaozhong Zhang * starting at bits 32-95. The low 32-bits go into the result of the
21835181e86SHaozhong Zhang * multiplication, the high 32-bits are carried into the next step.
21935181e86SHaozhong Zhang */
22035181e86SHaozhong Zhang rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
22135181e86SHaozhong Zhang rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
22235181e86SHaozhong Zhang rh.l.high = (c >> 32) + rh.l.high;
22335181e86SHaozhong Zhang
22435181e86SHaozhong Zhang /*
22535181e86SHaozhong Zhang * The 128-bit result of the multiplication is in rl.ll and rh.ll,
22635181e86SHaozhong Zhang * shift it right and throw away the high part of the result.
22735181e86SHaozhong Zhang */
22835181e86SHaozhong Zhang if (shift == 0)
22935181e86SHaozhong Zhang return rl.ll;
23035181e86SHaozhong Zhang if (shift < 64)
23135181e86SHaozhong Zhang return (rl.ll >> shift) | (rh.ll << (64 - shift));
23235181e86SHaozhong Zhang return rh.ll >> (shift & 63);
23335181e86SHaozhong Zhang }
23435181e86SHaozhong Zhang #endif /* mul_u64_u64_shr */
23535181e86SHaozhong Zhang
236be5e610cSPeter Zijlstra #endif
237be5e610cSPeter Zijlstra
238605a140aSIlias Stamatis #ifndef mul_s64_u64_shr
mul_s64_u64_shr(s64 a,u64 b,unsigned int shift)239605a140aSIlias Stamatis static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
240605a140aSIlias Stamatis {
241605a140aSIlias Stamatis u64 ret;
242605a140aSIlias Stamatis
243605a140aSIlias Stamatis /*
244605a140aSIlias Stamatis * Extract the sign before the multiplication and put it back
245605a140aSIlias Stamatis * afterwards if needed.
246605a140aSIlias Stamatis */
247605a140aSIlias Stamatis ret = mul_u64_u64_shr(abs(a), b, shift);
248605a140aSIlias Stamatis
249605a140aSIlias Stamatis if (a < 0)
250605a140aSIlias Stamatis ret = -((s64) ret);
251605a140aSIlias Stamatis
252605a140aSIlias Stamatis return ret;
253605a140aSIlias Stamatis }
254605a140aSIlias Stamatis #endif /* mul_s64_u64_shr */
255605a140aSIlias Stamatis
256381d585cSHaozhong Zhang #ifndef mul_u64_u32_div
mul_u64_u32_div(u64 a,u32 mul,u32 divisor)257381d585cSHaozhong Zhang static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
258381d585cSHaozhong Zhang {
259381d585cSHaozhong Zhang union {
260381d585cSHaozhong Zhang u64 ll;
261381d585cSHaozhong Zhang struct {
262381d585cSHaozhong Zhang #ifdef __BIG_ENDIAN
263381d585cSHaozhong Zhang u32 high, low;
264381d585cSHaozhong Zhang #else
265381d585cSHaozhong Zhang u32 low, high;
266381d585cSHaozhong Zhang #endif
267381d585cSHaozhong Zhang } l;
268381d585cSHaozhong Zhang } u, rl, rh;
269381d585cSHaozhong Zhang
270381d585cSHaozhong Zhang u.ll = a;
2719e3d6223SPeter Zijlstra rl.ll = mul_u32_u32(u.l.low, mul);
2729e3d6223SPeter Zijlstra rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
273381d585cSHaozhong Zhang
274381d585cSHaozhong Zhang /* Bits 32-63 of the result will be in rh.l.low. */
275381d585cSHaozhong Zhang rl.l.high = do_div(rh.ll, divisor);
276381d585cSHaozhong Zhang
277381d585cSHaozhong Zhang /* Bits 0-31 of the result will be in rl.l.low. */
278381d585cSHaozhong Zhang do_div(rl.ll, divisor);
279381d585cSHaozhong Zhang
280381d585cSHaozhong Zhang rl.l.high = rh.l.low;
281381d585cSHaozhong Zhang return rl.ll;
282381d585cSHaozhong Zhang }
283381d585cSHaozhong Zhang #endif /* mul_u64_u32_div */
284381d585cSHaozhong Zhang
2853dc167baSOleg Nesterov u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
2863dc167baSOleg Nesterov
287090f13caSLiam Beguin /**
288090f13caSLiam Beguin * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
289090f13caSLiam Beguin * @ll: unsigned 64bit dividend
290090f13caSLiam Beguin * @d: unsigned 64bit divisor
291090f13caSLiam Beguin *
292090f13caSLiam Beguin * Divide unsigned 64bit dividend by unsigned 64bit divisor
293090f13caSLiam Beguin * and round up.
294090f13caSLiam Beguin *
295090f13caSLiam Beguin * Return: dividend / divisor rounded up
296090f13caSLiam Beguin */
29768600f62SRoman Gushchin #define DIV64_U64_ROUND_UP(ll, d) \
29868600f62SRoman Gushchin ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
29968600f62SRoman Gushchin
300cb8be119SSimon Horman /**
301*1d4ce389SJacob Keller * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
302*1d4ce389SJacob Keller * @ll: unsigned 64bit dividend
303*1d4ce389SJacob Keller * @d: unsigned 32bit divisor
304*1d4ce389SJacob Keller *
305*1d4ce389SJacob Keller * Divide unsigned 64bit dividend by unsigned 32bit divisor
306*1d4ce389SJacob Keller * and round up.
307*1d4ce389SJacob Keller *
308*1d4ce389SJacob Keller * Return: dividend / divisor rounded up
309*1d4ce389SJacob Keller */
310*1d4ce389SJacob Keller #define DIV_U64_ROUND_UP(ll, d) \
311*1d4ce389SJacob Keller ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
312*1d4ce389SJacob Keller
313*1d4ce389SJacob Keller /**
314cb8be119SSimon Horman * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
315cb8be119SSimon Horman * @dividend: unsigned 64bit dividend
316cb8be119SSimon Horman * @divisor: unsigned 64bit divisor
317cb8be119SSimon Horman *
318cb8be119SSimon Horman * Divide unsigned 64bit dividend by unsigned 64bit divisor
319cb8be119SSimon Horman * and round to closest integer.
320cb8be119SSimon Horman *
321cb8be119SSimon Horman * Return: dividend / divisor rounded to nearest integer
322cb8be119SSimon Horman */
323cb8be119SSimon Horman #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
324cb8be119SSimon Horman ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
325cb8be119SSimon Horman
326d28a1de5SLiam Beguin /**
3272c861b73SPali Rohár * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
3282c861b73SPali Rohár * @dividend: unsigned 64bit dividend
3292c861b73SPali Rohár * @divisor: unsigned 32bit divisor
3302c861b73SPali Rohár *
3312c861b73SPali Rohár * Divide unsigned 64bit dividend by unsigned 32bit divisor
3322c861b73SPali Rohár * and round to closest integer.
3332c861b73SPali Rohár *
3342c861b73SPali Rohár * Return: dividend / divisor rounded to nearest integer
3352c861b73SPali Rohár */
3362c861b73SPali Rohár #define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
3372c861b73SPali Rohár ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
3382c861b73SPali Rohár
339d28a1de5SLiam Beguin /**
340af60459aSChunyan Zhang * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
341af60459aSChunyan Zhang * @dividend: signed 64bit dividend
342af60459aSChunyan Zhang * @divisor: signed 32bit divisor
343af60459aSChunyan Zhang *
344af60459aSChunyan Zhang * Divide signed 64bit dividend by signed 32bit divisor
345af60459aSChunyan Zhang * and round to closest integer.
346af60459aSChunyan Zhang *
347af60459aSChunyan Zhang * Return: dividend / divisor rounded to nearest integer
348af60459aSChunyan Zhang */
349af60459aSChunyan Zhang #define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
350af60459aSChunyan Zhang { \
351af60459aSChunyan Zhang s64 __x = (dividend); \
352af60459aSChunyan Zhang s32 __d = (divisor); \
353af60459aSChunyan Zhang ((__x > 0) == (__d > 0)) ? \
354af60459aSChunyan Zhang div_s64((__x + (__d / 2)), __d) : \
355af60459aSChunyan Zhang div_s64((__x - (__d / 2)), __d); \
356af60459aSChunyan Zhang } \
357af60459aSChunyan Zhang )
358*1d4ce389SJacob Keller
359*1d4ce389SJacob Keller /**
360*1d4ce389SJacob Keller * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
361*1d4ce389SJacob Keller * @x: the value to up
362*1d4ce389SJacob Keller * @y: 32bit multiple to round up to
363*1d4ce389SJacob Keller *
364*1d4ce389SJacob Keller * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
365*1d4ce389SJacob Keller * the faster round_up() for powers of 2.
366*1d4ce389SJacob Keller *
367*1d4ce389SJacob Keller * Return: rounded up value.
368*1d4ce389SJacob Keller */
roundup_u64(u64 x,u32 y)369*1d4ce389SJacob Keller static inline u64 roundup_u64(u64 x, u32 y)
370*1d4ce389SJacob Keller {
371*1d4ce389SJacob Keller return DIV_U64_ROUND_UP(x, y) * y;
372*1d4ce389SJacob Keller }
3732418f4f2SRoman Zippel #endif /* _LINUX_MATH64_H */
374