blob: c87f1528703a68c2dd024d612a203ae4517b5cea [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
9/**
10 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
11 *
12 * This is commonly provided by 32bit archs to provide an optimized 64bit
13 * divide.
14 */
15static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16{
17 *remainder = dividend % divisor;
18 return dividend / divisor;
19}
20
21/**
22 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
23 */
24static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
25{
26 *remainder = dividend % divisor;
27 return dividend / divisor;
28}
29
Roman Zippel6f6d6a12008-05-01 04:34:28 -070030/**
31 * div64_u64 - unsigned 64bit divide with 64bit divisor
32 */
33static inline u64 div64_u64(u64 dividend, u64 divisor)
34{
35 return dividend / divisor;
36}
37
Roman Zippel2418f4f2008-05-01 04:34:25 -070038#elif BITS_PER_LONG == 32
39
40#ifndef div_u64_rem
41static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
42{
43 *remainder = do_div(dividend, divisor);
44 return dividend;
45}
46#endif
47
48#ifndef div_s64_rem
49extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
50#endif
51
Roman Zippel6f6d6a12008-05-01 04:34:28 -070052#ifndef div64_u64
53extern u64 div64_u64(u64 dividend, u64 divisor);
54#endif
55
Roman Zippel2418f4f2008-05-01 04:34:25 -070056#endif /* BITS_PER_LONG */
57
58/**
59 * div_u64 - unsigned 64bit divide with 32bit divisor
60 *
61 * This is the most common 64bit divide and should be used if possible,
62 * as many 32bit archs can optimize this variant better than a full 64bit
63 * divide.
64 */
65#ifndef div_u64
66static inline u64 div_u64(u64 dividend, u32 divisor)
67{
68 u32 remainder;
69 return div_u64_rem(dividend, divisor, &remainder);
70}
71#endif
72
73/**
74 * div_s64 - signed 64bit divide with 32bit divisor
75 */
76#ifndef div_s64
77static inline s64 div_s64(s64 dividend, s32 divisor)
78{
79 s32 remainder;
80 return div_s64_rem(dividend, divisor, &remainder);
81}
82#endif
83
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +020084u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
85
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +020086static __always_inline u32
87__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
88{
89 u32 ret = 0;
90
91 while (dividend >= divisor) {
92 /* The following asm() prevents the compiler from
93 optimising this loop into a modulo operation. */
94 asm("" : "+rm"(dividend));
95
96 dividend -= divisor;
97 ret++;
98 }
99
100 *remainder = dividend;
101
102 return ret;
103}
104
Roman Zippel2418f4f2008-05-01 04:34:25 -0700105#endif /* _LINUX_MATH64_H */