Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MATH64_H |
| 2 | #define _LINUX_MATH64_H |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <asm/div64.h> |
| 6 | |
| 7 | #if BITS_PER_LONG == 64 |
| 8 | |
Alex Shi | c2853c8 | 2013-06-12 14:05:10 -0700 | [diff] [blame] | 9 | #define div64_long(x, y) div64_s64((x), (y)) |
| 10 | #define div64_ul(x, y) div64_u64((x), (y)) |
Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 11 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 12 | /** |
| 13 | * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
| 14 | * |
| 15 | * This is commonly provided by 32bit archs to provide an optimized 64bit |
| 16 | * divide. |
| 17 | */ |
| 18 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| 19 | { |
| 20 | *remainder = dividend % divisor; |
| 21 | return dividend / divisor; |
| 22 | } |
| 23 | |
| 24 | /** |
| 25 | * div_s64_rem - signed 64bit divide with 32bit divisor with remainder |
| 26 | */ |
| 27 | static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
| 28 | { |
| 29 | *remainder = dividend % divisor; |
| 30 | return dividend / divisor; |
| 31 | } |
| 32 | |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 33 | /** |
Mike Snitzer | eb18cba | 2013-08-20 15:05:17 -0400 | [diff] [blame] | 34 | * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder |
| 35 | */ |
| 36 | static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) |
| 37 | { |
| 38 | *remainder = dividend % divisor; |
| 39 | return dividend / divisor; |
| 40 | } |
| 41 | |
| 42 | /** |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 43 | * div64_u64 - unsigned 64bit divide with 64bit divisor |
| 44 | */ |
| 45 | static inline u64 div64_u64(u64 dividend, u64 divisor) |
| 46 | { |
| 47 | return dividend / divisor; |
| 48 | } |
| 49 | |
Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 50 | /** |
| 51 | * div64_s64 - signed 64bit divide with 64bit divisor |
| 52 | */ |
| 53 | static inline s64 div64_s64(s64 dividend, s64 divisor) |
| 54 | { |
| 55 | return dividend / divisor; |
| 56 | } |
| 57 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 58 | #elif BITS_PER_LONG == 32 |
| 59 | |
Alex Shi | c2853c8 | 2013-06-12 14:05:10 -0700 | [diff] [blame] | 60 | #define div64_long(x, y) div_s64((x), (y)) |
| 61 | #define div64_ul(x, y) div_u64((x), (y)) |
Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 62 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 63 | #ifndef div_u64_rem |
| 64 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| 65 | { |
| 66 | *remainder = do_div(dividend, divisor); |
| 67 | return dividend; |
| 68 | } |
| 69 | #endif |
| 70 | |
| 71 | #ifndef div_s64_rem |
| 72 | extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); |
| 73 | #endif |
| 74 | |
Mike Snitzer | eb18cba | 2013-08-20 15:05:17 -0400 | [diff] [blame] | 75 | #ifndef div64_u64_rem |
| 76 | extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); |
| 77 | #endif |
| 78 | |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 79 | #ifndef div64_u64 |
Stanislaw Gruszka | f300213 | 2013-04-30 11:35:07 +0200 | [diff] [blame] | 80 | extern u64 div64_u64(u64 dividend, u64 divisor); |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 81 | #endif |
| 82 | |
Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 83 | #ifndef div64_s64 |
| 84 | extern s64 div64_s64(s64 dividend, s64 divisor); |
| 85 | #endif |
| 86 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 87 | #endif /* BITS_PER_LONG */ |
| 88 | |
| 89 | /** |
| 90 | * div_u64 - unsigned 64bit divide with 32bit divisor |
| 91 | * |
| 92 | * This is the most common 64bit divide and should be used if possible, |
| 93 | * as many 32bit archs can optimize this variant better than a full 64bit |
| 94 | * divide. |
| 95 | */ |
| 96 | #ifndef div_u64 |
| 97 | static inline u64 div_u64(u64 dividend, u32 divisor) |
| 98 | { |
| 99 | u32 remainder; |
| 100 | return div_u64_rem(dividend, divisor, &remainder); |
| 101 | } |
| 102 | #endif |
| 103 | |
| 104 | /** |
| 105 | * div_s64 - signed 64bit divide with 32bit divisor |
| 106 | */ |
| 107 | #ifndef div_s64 |
| 108 | static inline s64 div_s64(s64 dividend, s32 divisor) |
| 109 | { |
| 110 | s32 remainder; |
| 111 | return div_s64_rem(dividend, divisor, &remainder); |
| 112 | } |
| 113 | #endif |
| 114 | |
Jeremy Fitzhardinge | f595ec9 | 2008-06-12 10:47:56 +0200 | [diff] [blame] | 115 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); |
| 116 | |
Jeremy Fitzhardinge | d5e181f | 2008-06-12 10:47:58 +0200 | [diff] [blame] | 117 | static __always_inline u32 |
| 118 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) |
| 119 | { |
| 120 | u32 ret = 0; |
| 121 | |
| 122 | while (dividend >= divisor) { |
| 123 | /* The following asm() prevents the compiler from |
| 124 | optimising this loop into a modulo operation. */ |
| 125 | asm("" : "+rm"(dividend)); |
| 126 | |
| 127 | dividend -= divisor; |
| 128 | ret++; |
| 129 | } |
| 130 | |
| 131 | *remainder = dividend; |
| 132 | |
| 133 | return ret; |
| 134 | } |
| 135 | |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 136 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) |
| 137 | |
| 138 | #ifndef mul_u64_u32_shr |
| 139 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
| 140 | { |
| 141 | return (u64)(((unsigned __int128)a * mul) >> shift); |
| 142 | } |
| 143 | #endif /* mul_u64_u32_shr */ |
| 144 | |
| 145 | #else |
| 146 | |
| 147 | #ifndef mul_u64_u32_shr |
| 148 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
| 149 | { |
| 150 | u32 ah, al; |
| 151 | u64 ret; |
| 152 | |
| 153 | al = a; |
| 154 | ah = a >> 32; |
| 155 | |
| 156 | ret = ((u64)al * mul) >> shift; |
| 157 | if (ah) |
| 158 | ret += ((u64)ah * mul) << (32 - shift); |
| 159 | |
| 160 | return ret; |
| 161 | } |
| 162 | #endif /* mul_u64_u32_shr */ |
| 163 | |
| 164 | #endif |
| 165 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 166 | #endif /* _LINUX_MATH64_H */ |