blob: 8dd2488bf2897ec0e8cf11dc41191ee01484cec1 [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
Alex Shic2853c82013-06-12 14:05:10 -07009#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040011
Roman Zippel2418f4f2008-05-01 04:34:25 -070012/**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070014 * @dividend: unsigned 64bit dividend
15 * @divisor: unsigned 32bit divisor
16 * @remainder: pointer to unsigned 32bit remainder
17 *
18 * Return: sets ``*remainder``, then returns dividend / divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -070019 *
20 * This is commonly provided by 32bit archs to provide an optimized 64bit
21 * divide.
22 */
23static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
24{
25 *remainder = dividend % divisor;
26 return dividend / divisor;
27}
28
29/**
30 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070031 * @dividend: signed 64bit dividend
32 * @divisor: signed 32bit divisor
33 * @remainder: pointer to signed 32bit remainder
34 *
35 * Return: sets ``*remainder``, then returns dividend / divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -070036 */
37static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
38{
39 *remainder = dividend % divisor;
40 return dividend / divisor;
41}
42
Roman Zippel6f6d6a12008-05-01 04:34:28 -070043/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040044 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070045 * @dividend: unsigned 64bit dividend
46 * @divisor: unsigned 64bit divisor
47 * @remainder: pointer to unsigned 64bit remainder
48 *
49 * Return: sets ``*remainder``, then returns dividend / divisor
Mike Snitzereb18cba2013-08-20 15:05:17 -040050 */
51static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
52{
53 *remainder = dividend % divisor;
54 return dividend / divisor;
55}
56
57/**
Roman Zippel6f6d6a12008-05-01 04:34:28 -070058 * div64_u64 - unsigned 64bit divide with 64bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -070059 * @dividend: unsigned 64bit dividend
60 * @divisor: unsigned 64bit divisor
61 *
62 * Return: dividend / divisor
Roman Zippel6f6d6a12008-05-01 04:34:28 -070063 */
64static inline u64 div64_u64(u64 dividend, u64 divisor)
65{
66 return dividend / divisor;
67}
68
Brian Behlendorf658716d2010-10-26 14:23:10 -070069/**
70 * div64_s64 - signed 64bit divide with 64bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -070071 * @dividend: signed 64bit dividend
72 * @divisor: signed 64bit divisor
73 *
74 * Return: dividend / divisor
Brian Behlendorf658716d2010-10-26 14:23:10 -070075 */
76static inline s64 div64_s64(s64 dividend, s64 divisor)
77{
78 return dividend / divisor;
79}
80
Roman Zippel2418f4f2008-05-01 04:34:25 -070081#elif BITS_PER_LONG == 32
82
Alex Shic2853c82013-06-12 14:05:10 -070083#define div64_long(x, y) div_s64((x), (y))
84#define div64_ul(x, y) div_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040085
Roman Zippel2418f4f2008-05-01 04:34:25 -070086#ifndef div_u64_rem
87static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
88{
89 *remainder = do_div(dividend, divisor);
90 return dividend;
91}
92#endif
93
94#ifndef div_s64_rem
95extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
96#endif
97
Mike Snitzereb18cba2013-08-20 15:05:17 -040098#ifndef div64_u64_rem
99extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
100#endif
101
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700102#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200103extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700104#endif
105
Brian Behlendorf658716d2010-10-26 14:23:10 -0700106#ifndef div64_s64
107extern s64 div64_s64(s64 dividend, s64 divisor);
108#endif
109
Roman Zippel2418f4f2008-05-01 04:34:25 -0700110#endif /* BITS_PER_LONG */
111
112/**
113 * div_u64 - unsigned 64bit divide with 32bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -0700114 * @dividend: unsigned 64bit dividend
115 * @divisor: unsigned 32bit divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -0700116 *
117 * This is the most common 64bit divide and should be used if possible,
118 * as many 32bit archs can optimize this variant better than a full 64bit
119 * divide.
120 */
121#ifndef div_u64
122static inline u64 div_u64(u64 dividend, u32 divisor)
123{
124 u32 remainder;
125 return div_u64_rem(dividend, divisor, &remainder);
126}
127#endif
128
129/**
130 * div_s64 - signed 64bit divide with 32bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -0700131 * @dividend: signed 64bit dividend
132 * @divisor: signed 32bit divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -0700133 */
134#ifndef div_s64
135static inline s64 div_s64(s64 dividend, s32 divisor)
136{
137 s32 remainder;
138 return div_s64_rem(dividend, divisor, &remainder);
139}
140#endif
141
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200142u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
143
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200144static __always_inline u32
145__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
146{
147 u32 ret = 0;
148
149 while (dividend >= divisor) {
150 /* The following asm() prevents the compiler from
151 optimising this loop into a modulo operation. */
152 asm("" : "+rm"(dividend));
153
154 dividend -= divisor;
155 ret++;
156 }
157
158 *remainder = dividend;
159
160 return ret;
161}
162
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100163#ifndef mul_u32_u32
164/*
165 * Many a GCC version messes this up and generates a 64x64 mult :-(
166 */
167static inline u64 mul_u32_u32(u32 a, u32 b)
168{
169 return (u64)a * b;
170}
171#endif
172
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100173#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
174
175#ifndef mul_u64_u32_shr
176static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
177{
178 return (u64)(((unsigned __int128)a * mul) >> shift);
179}
180#endif /* mul_u64_u32_shr */
181
Haozhong Zhang35181e82015-10-20 15:39:03 +0800182#ifndef mul_u64_u64_shr
183static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
184{
185 return (u64)(((unsigned __int128)a * mul) >> shift);
186}
187#endif /* mul_u64_u64_shr */
188
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100189#else
190
191#ifndef mul_u64_u32_shr
192static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
193{
194 u32 ah, al;
195 u64 ret;
196
197 al = a;
198 ah = a >> 32;
199
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100200 ret = mul_u32_u32(al, mul) >> shift;
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100201 if (ah)
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100202 ret += mul_u32_u32(ah, mul) << (32 - shift);
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100203
204 return ret;
205}
206#endif /* mul_u64_u32_shr */
207
Haozhong Zhang35181e82015-10-20 15:39:03 +0800208#ifndef mul_u64_u64_shr
209static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
210{
211 union {
212 u64 ll;
213 struct {
214#ifdef __BIG_ENDIAN
215 u32 high, low;
216#else
217 u32 low, high;
218#endif
219 } l;
220 } rl, rm, rn, rh, a0, b0;
221 u64 c;
222
223 a0.ll = a;
224 b0.ll = b;
225
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100226 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
227 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
228 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
229 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
Haozhong Zhang35181e82015-10-20 15:39:03 +0800230
231 /*
232 * Each of these lines computes a 64-bit intermediate result into "c",
233 * starting at bits 32-95. The low 32-bits go into the result of the
234 * multiplication, the high 32-bits are carried into the next step.
235 */
236 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
237 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
238 rh.l.high = (c >> 32) + rh.l.high;
239
240 /*
241 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
242 * shift it right and throw away the high part of the result.
243 */
244 if (shift == 0)
245 return rl.ll;
246 if (shift < 64)
247 return (rl.ll >> shift) | (rh.ll << (64 - shift));
248 return rh.ll >> (shift & 63);
249}
250#endif /* mul_u64_u64_shr */
251
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100252#endif
253
Haozhong Zhang381d5852015-10-20 15:39:04 +0800254#ifndef mul_u64_u32_div
255static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
256{
257 union {
258 u64 ll;
259 struct {
260#ifdef __BIG_ENDIAN
261 u32 high, low;
262#else
263 u32 low, high;
264#endif
265 } l;
266 } u, rl, rh;
267
268 u.ll = a;
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100269 rl.ll = mul_u32_u32(u.l.low, mul);
270 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
Haozhong Zhang381d5852015-10-20 15:39:04 +0800271
272 /* Bits 32-63 of the result will be in rh.l.low. */
273 rl.l.high = do_div(rh.ll, divisor);
274
275 /* Bits 0-31 of the result will be in rl.l.low. */
276 do_div(rl.ll, divisor);
277
278 rl.l.high = rh.l.low;
279 return rl.ll;
280}
281#endif /* mul_u64_u32_div */
282
Roman Zippel2418f4f2008-05-01 04:34:25 -0700283#endif /* _LINUX_MATH64_H */