| #ifndef _LINUX_MATH64_H |
| #define _LINUX_MATH64_H |
| |
| #include <linux/types.h> |
| #include <asm/div64.h> |
| |
| #if BITS_PER_LONG == 64 |
| |
| /** |
| * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
| * |
| * This is commonly provided by 32bit archs to provide an optimized 64bit |
| * divide. |
| */ |
| static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| { |
| *remainder = dividend % divisor; |
| return dividend / divisor; |
| } |
| |
| /** |
| * div_s64_rem - signed 64bit divide with 32bit divisor with remainder |
| */ |
| static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
| { |
| *remainder = dividend % divisor; |
| return dividend / divisor; |
| } |
| |
| /** |
| * div64_u64 - unsigned 64bit divide with 64bit divisor |
| */ |
| static inline u64 div64_u64(u64 dividend, u64 divisor) |
| { |
| return dividend / divisor; |
| } |
| |
| #elif BITS_PER_LONG == 32 |
| |
| #ifndef div_u64_rem |
| static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| { |
| *remainder = do_div(dividend, divisor); |
| return dividend; |
| } |
| #endif |
| |
| #ifndef div_s64_rem |
| extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); |
| #endif |
| |
| #ifndef div64_u64 |
| extern u64 div64_u64(u64 dividend, u64 divisor); |
| #endif |
| |
| #endif /* BITS_PER_LONG */ |
| |
| /** |
| * div_u64 - unsigned 64bit divide with 32bit divisor |
| * |
| * This is the most common 64bit divide and should be used if possible, |
| * as many 32bit archs can optimize this variant better than a full 64bit |
| * divide. |
| */ |
| #ifndef div_u64 |
| static inline u64 div_u64(u64 dividend, u32 divisor) |
| { |
| u32 remainder; |
| return div_u64_rem(dividend, divisor, &remainder); |
| } |
| #endif |
| |
| /** |
| * div_s64 - signed 64bit divide with 32bit divisor |
| */ |
| #ifndef div_s64 |
| static inline s64 div_s64(s64 dividend, s32 divisor) |
| { |
| s32 remainder; |
| return div_s64_rem(dividend, divisor, &remainder); |
| } |
| #endif |
| |
| u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); |
| |
| static __always_inline u32 |
| __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) |
| { |
| u32 ret = 0; |
| |
| while (dividend >= divisor) { |
| /* The following asm() prevents the compiler from |
| optimising this loop into a modulo operation. */ |
| asm("" : "+rm"(dividend)); |
| |
| dividend -= divisor; |
| ret++; |
| } |
| |
| *remainder = dividend; |
| |
| return ret; |
| } |
| |
| #endif /* _LINUX_MATH64_H */ |