blob: b8ba85544721fd911ce52c61e08a8ba88328ecfe [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
Sasha Levinf9103812012-03-15 12:36:13 -04009#define div64_long(x,y) div64_s64((x),(y))
10
Roman Zippel2418f4f2008-05-01 04:34:25 -070011/**
12 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
13 *
14 * This is commonly provided by 32bit archs to provide an optimized 64bit
15 * divide.
16 */
17static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
18{
19 *remainder = dividend % divisor;
20 return dividend / divisor;
21}
22
23/**
24 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
25 */
26static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
27{
28 *remainder = dividend % divisor;
29 return dividend / divisor;
30}
31
Roman Zippel6f6d6a12008-05-01 04:34:28 -070032/**
33 * div64_u64 - unsigned 64bit divide with 64bit divisor
34 */
35static inline u64 div64_u64(u64 dividend, u64 divisor)
36{
37 return dividend / divisor;
38}
39
Brian Behlendorf658716d2010-10-26 14:23:10 -070040/**
41 * div64_s64 - signed 64bit divide with 64bit divisor
42 */
43static inline s64 div64_s64(s64 dividend, s64 divisor)
44{
45 return dividend / divisor;
46}
47
Roman Zippel2418f4f2008-05-01 04:34:25 -070048#elif BITS_PER_LONG == 32
49
Sasha Levinf9103812012-03-15 12:36:13 -040050#define div64_long(x,y) div_s64((x),(y))
51
Roman Zippel2418f4f2008-05-01 04:34:25 -070052#ifndef div_u64_rem
53static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
54{
55 *remainder = do_div(dividend, divisor);
56 return dividend;
57}
58#endif
59
60#ifndef div_s64_rem
61extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
62#endif
63
Roman Zippel6f6d6a12008-05-01 04:34:28 -070064#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +020065extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -070066#endif
67
Brian Behlendorf658716d2010-10-26 14:23:10 -070068#ifndef div64_s64
69extern s64 div64_s64(s64 dividend, s64 divisor);
70#endif
71
Roman Zippel2418f4f2008-05-01 04:34:25 -070072#endif /* BITS_PER_LONG */
73
74/**
75 * div_u64 - unsigned 64bit divide with 32bit divisor
76 *
77 * This is the most common 64bit divide and should be used if possible,
78 * as many 32bit archs can optimize this variant better than a full 64bit
79 * divide.
80 */
81#ifndef div_u64
82static inline u64 div_u64(u64 dividend, u32 divisor)
83{
84 u32 remainder;
85 return div_u64_rem(dividend, divisor, &remainder);
86}
87#endif
88
89/**
90 * div_s64 - signed 64bit divide with 32bit divisor
91 */
92#ifndef div_s64
93static inline s64 div_s64(s64 dividend, s32 divisor)
94{
95 s32 remainder;
96 return div_s64_rem(dividend, divisor, &remainder);
97}
98#endif
99
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200100u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
101
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200102static __always_inline u32
103__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
104{
105 u32 ret = 0;
106
107 while (dividend >= divisor) {
108 /* The following asm() prevents the compiler from
109 optimising this loop into a modulo operation. */
110 asm("" : "+rm"(dividend));
111
112 dividend -= divisor;
113 ret++;
114 }
115
116 *remainder = dividend;
117
118 return ret;
119}
120
Roman Zippel2418f4f2008-05-01 04:34:25 -0700121#endif /* _LINUX_MATH64_H */