blob: 3af5728d95fda3c2aa740deec3f6de69cc847e94 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
3 *
4 * Based on former do_div() implementation from asm-parisc/div64.h:
5 * Copyright (C) 1999 Hewlett-Packard Co
6 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 *
9 * Generic C version of 64bit/32bit division and modulo, with
10 * 64bit result and 32bit remainder.
11 *
12 * The fast case for (n>>32 == 0) is handled inline by do_div().
13 *
14 * Code generated for this function might be very inefficient
15 * for some CPUs. __div64_32() can be overridden by linking arch-specific
16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
17 */
18
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050019#include <linux/export.h>
20#include <linux/kernel.h>
Roman Zippel2418f4f2008-05-01 04:34:25 -070021#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23/* Not needed on 64bit architectures */
24#if BITS_PER_LONG == 32
25
David S. Millercb8c1812007-04-10 22:10:39 -070026uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
28 uint64_t rem = *n;
29 uint64_t b = base;
30 uint64_t res, d = 1;
31 uint32_t high = rem >> 32;
32
33 /* Reduce the thing a bit first */
34 res = 0;
35 if (high >= base) {
36 high /= base;
37 res = (uint64_t) high << 32;
38 rem -= (uint64_t) (high*base) << 32;
39 }
40
41 while ((int64_t)b > 0 && b < rem) {
42 b = b+b;
43 d = d+d;
44 }
45
46 do {
47 if (rem >= b) {
48 rem -= b;
49 res += d;
50 }
51 b >>= 1;
52 d >>= 1;
53 } while (d);
54
55 *n = res;
56 return rem;
57}
58
59EXPORT_SYMBOL(__div64_32);
60
Roman Zippel2418f4f2008-05-01 04:34:25 -070061#ifndef div_s64_rem
62s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
63{
64 u64 quotient;
65
66 if (dividend < 0) {
67 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
68 *remainder = -*remainder;
69 if (divisor > 0)
70 quotient = -quotient;
71 } else {
72 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
73 if (divisor < 0)
74 quotient = -quotient;
75 }
76 return quotient;
77}
78EXPORT_SYMBOL(div_s64_rem);
79#endif
80
Brian Behlendorf658716d2010-10-26 14:23:10 -070081/**
Frederic Weisbeckerf7926852013-03-05 18:05:46 +010082 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder
Brian Behlendorf658716d2010-10-26 14:23:10 -070083 * @dividend: 64bit dividend
84 * @divisor: 64bit divisor
Frederic Weisbeckerf7926852013-03-05 18:05:46 +010085 * @remainder: 64bit remainder
Brian Behlendorf658716d2010-10-26 14:23:10 -070086 *
87 * This implementation is a modified version of the algorithm proposed
88 * by the book 'Hacker's Delight'. The original source and full proof
89 * can be found here and is available for use without restriction.
90 *
Akinobu Mita422aa272012-06-09 11:22:47 +090091 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
Brian Behlendorf658716d2010-10-26 14:23:10 -070092 */
Frederic Weisbeckerf7926852013-03-05 18:05:46 +010093#ifndef div64_u64_rem
94u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
Stephen Hemminger3927f2e2007-03-25 19:54:23 -070095{
Brian Behlendorf658716d2010-10-26 14:23:10 -070096 u32 high = divisor >> 32;
97 u64 quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -070098
Brian Behlendorf658716d2010-10-26 14:23:10 -070099 if (high == 0) {
Frederic Weisbeckerf7926852013-03-05 18:05:46 +0100100 u32 rem32;
101 quot = div_u64_rem(dividend, divisor, &rem32);
102 *remainder = rem32;
Brian Behlendorf658716d2010-10-26 14:23:10 -0700103 } else {
104 int n = 1 + fls(high);
105 quot = div_u64(dividend >> n, divisor >> n);
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700106
Brian Behlendorf658716d2010-10-26 14:23:10 -0700107 if (quot != 0)
108 quot--;
Frederic Weisbeckerf7926852013-03-05 18:05:46 +0100109
110 *remainder = dividend - quot * divisor;
111 if (*remainder >= divisor) {
Brian Behlendorf658716d2010-10-26 14:23:10 -0700112 quot++;
Frederic Weisbeckerf7926852013-03-05 18:05:46 +0100113 *remainder -= divisor;
114 }
Brian Behlendorf658716d2010-10-26 14:23:10 -0700115 }
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700116
Brian Behlendorf658716d2010-10-26 14:23:10 -0700117 return quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700118}
Frederic Weisbeckerf7926852013-03-05 18:05:46 +0100119EXPORT_SYMBOL(div64_u64_rem);
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700120#endif
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700121
Brian Behlendorf658716d2010-10-26 14:23:10 -0700122/**
123 * div64_s64 - signed 64bit divide with 64bit divisor
124 * @dividend: 64bit dividend
125 * @divisor: 64bit divisor
126 */
127#ifndef div64_s64
128s64 div64_s64(s64 dividend, s64 divisor)
129{
130 s64 quot, t;
131
132 quot = div64_u64(abs64(dividend), abs64(divisor));
133 t = (dividend ^ divisor) >> 63;
134
135 return (quot ^ t) - t;
136}
137EXPORT_SYMBOL(div64_s64);
138#endif
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#endif /* BITS_PER_LONG == 32 */
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200141
142/*
143 * Iterative div/mod for use when dividend is not expected to be much
144 * bigger than divisor.
145 */
146u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
147{
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200148 return __iter_div_u64_rem(dividend, divisor, remainder);
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200149}
150EXPORT_SYMBOL(iter_div_u64_rem);