blob: c45c089bfdaca9a91f32832102ff32291444884f [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
Alex Shic2853c82013-06-12 14:05:10 -07009#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040011
Roman Zippel2418f4f2008-05-01 04:34:25 -070012/**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
14 *
15 * This is commonly provided by 32bit archs to provide an optimized 64bit
16 * divide.
17 */
18static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
19{
20 *remainder = dividend % divisor;
21 return dividend / divisor;
22}
23
24/**
25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
26 */
27static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
28{
29 *remainder = dividend % divisor;
30 return dividend / divisor;
31}
32
Roman Zippel6f6d6a12008-05-01 04:34:28 -070033/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040034 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
35 */
36static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
37{
38 *remainder = dividend % divisor;
39 return dividend / divisor;
40}
41
42/**
Roman Zippel6f6d6a12008-05-01 04:34:28 -070043 * div64_u64 - unsigned 64bit divide with 64bit divisor
44 */
45static inline u64 div64_u64(u64 dividend, u64 divisor)
46{
47 return dividend / divisor;
48}
49
Brian Behlendorf658716d2010-10-26 14:23:10 -070050/**
51 * div64_s64 - signed 64bit divide with 64bit divisor
52 */
53static inline s64 div64_s64(s64 dividend, s64 divisor)
54{
55 return dividend / divisor;
56}
57
Roman Zippel2418f4f2008-05-01 04:34:25 -070058#elif BITS_PER_LONG == 32
59
Alex Shic2853c82013-06-12 14:05:10 -070060#define div64_long(x, y) div_s64((x), (y))
61#define div64_ul(x, y) div_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040062
Roman Zippel2418f4f2008-05-01 04:34:25 -070063#ifndef div_u64_rem
64static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
65{
66 *remainder = do_div(dividend, divisor);
67 return dividend;
68}
69#endif
70
71#ifndef div_s64_rem
72extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
73#endif
74
Mike Snitzereb18cba2013-08-20 15:05:17 -040075#ifndef div64_u64_rem
76extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
77#endif
78
Roman Zippel6f6d6a12008-05-01 04:34:28 -070079#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +020080extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -070081#endif
82
Brian Behlendorf658716d2010-10-26 14:23:10 -070083#ifndef div64_s64
84extern s64 div64_s64(s64 dividend, s64 divisor);
85#endif
86
Roman Zippel2418f4f2008-05-01 04:34:25 -070087#endif /* BITS_PER_LONG */
88
89/**
90 * div_u64 - unsigned 64bit divide with 32bit divisor
91 *
92 * This is the most common 64bit divide and should be used if possible,
93 * as many 32bit archs can optimize this variant better than a full 64bit
94 * divide.
95 */
96#ifndef div_u64
97static inline u64 div_u64(u64 dividend, u32 divisor)
98{
99 u32 remainder;
100 return div_u64_rem(dividend, divisor, &remainder);
101}
102#endif
103
104/**
105 * div_s64 - signed 64bit divide with 32bit divisor
106 */
107#ifndef div_s64
108static inline s64 div_s64(s64 dividend, s32 divisor)
109{
110 s32 remainder;
111 return div_s64_rem(dividend, divisor, &remainder);
112}
113#endif
114
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200115u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
116
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200117static __always_inline u32
118__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
119{
120 u32 ret = 0;
121
122 while (dividend >= divisor) {
123 /* The following asm() prevents the compiler from
124 optimising this loop into a modulo operation. */
125 asm("" : "+rm"(dividend));
126
127 dividend -= divisor;
128 ret++;
129 }
130
131 *remainder = dividend;
132
133 return ret;
134}
135
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
Roman Zippel2418f4f2008-05-01 04:34:25 -0700166#endif /* _LINUX_MATH64_H */