blob: 2913b86eb12a7a1068991b9342e7ed43c8eec1fe [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
Alex Shic2853c82013-06-12 14:05:10 -07009#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040011
Roman Zippel2418f4f2008-05-01 04:34:25 -070012/**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
14 *
15 * This is commonly provided by 32bit archs to provide an optimized 64bit
16 * divide.
17 */
18static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
19{
20 *remainder = dividend % divisor;
21 return dividend / divisor;
22}
23
24/**
25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
26 */
27static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
28{
29 *remainder = dividend % divisor;
30 return dividend / divisor;
31}
32
Roman Zippel6f6d6a12008-05-01 04:34:28 -070033/**
34 * div64_u64 - unsigned 64bit divide with 64bit divisor
35 */
36static inline u64 div64_u64(u64 dividend, u64 divisor)
37{
38 return dividend / divisor;
39}
40
Brian Behlendorf658716d2010-10-26 14:23:10 -070041/**
42 * div64_s64 - signed 64bit divide with 64bit divisor
43 */
44static inline s64 div64_s64(s64 dividend, s64 divisor)
45{
46 return dividend / divisor;
47}
48
Roman Zippel2418f4f2008-05-01 04:34:25 -070049#elif BITS_PER_LONG == 32
50
Alex Shic2853c82013-06-12 14:05:10 -070051#define div64_long(x, y) div_s64((x), (y))
52#define div64_ul(x, y) div_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040053
Roman Zippel2418f4f2008-05-01 04:34:25 -070054#ifndef div_u64_rem
55static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
56{
57 *remainder = do_div(dividend, divisor);
58 return dividend;
59}
60#endif
61
62#ifndef div_s64_rem
63extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
64#endif
65
Roman Zippel6f6d6a12008-05-01 04:34:28 -070066#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +020067extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -070068#endif
69
Brian Behlendorf658716d2010-10-26 14:23:10 -070070#ifndef div64_s64
71extern s64 div64_s64(s64 dividend, s64 divisor);
72#endif
73
Roman Zippel2418f4f2008-05-01 04:34:25 -070074#endif /* BITS_PER_LONG */
75
76/**
77 * div_u64 - unsigned 64bit divide with 32bit divisor
78 *
79 * This is the most common 64bit divide and should be used if possible,
80 * as many 32bit archs can optimize this variant better than a full 64bit
81 * divide.
82 */
83#ifndef div_u64
84static inline u64 div_u64(u64 dividend, u32 divisor)
85{
86 u32 remainder;
87 return div_u64_rem(dividend, divisor, &remainder);
88}
89#endif
90
91/**
92 * div_s64 - signed 64bit divide with 32bit divisor
93 */
94#ifndef div_s64
95static inline s64 div_s64(s64 dividend, s32 divisor)
96{
97 s32 remainder;
98 return div_s64_rem(dividend, divisor, &remainder);
99}
100#endif
101
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200102u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
103
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200104static __always_inline u32
105__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
106{
107 u32 ret = 0;
108
109 while (dividend >= divisor) {
110 /* The following asm() prevents the compiler from
111 optimising this loop into a modulo operation. */
112 asm("" : "+rm"(dividend));
113
114 dividend -= divisor;
115 ret++;
116 }
117
118 *remainder = dividend;
119
120 return ret;
121}
122
Roman Zippel2418f4f2008-05-01 04:34:25 -0700123#endif /* _LINUX_MATH64_H */