blob: ee146bb4c558055d335a1b5409f5d00f7467bed2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
4 *
5 * Based on former do_div() implementation from asm-parisc/div64.h:
6 * Copyright (C) 1999 Hewlett-Packard Co
7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 *
10 * Generic C version of 64bit/32bit division and modulo, with
11 * 64bit result and 32bit remainder.
12 *
13 * The fast case for (n>>32 == 0) is handled inline by do_div().
14 *
15 * Code generated for this function might be very inefficient
16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
Nicolas Pitredce1eb92015-11-02 13:02:46 -050017 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050021#include <linux/export.h>
22#include <linux/kernel.h>
Roman Zippel2418f4f2008-05-01 04:34:25 -070023#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/* Not needed on 64bit architectures */
26#if BITS_PER_LONG == 32
27
Nicolas Pitredce1eb92015-11-02 13:02:46 -050028#ifndef __div64_32
David S. Millercb8c1812007-04-10 22:10:39 -070029uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 uint64_t rem = *n;
32 uint64_t b = base;
33 uint64_t res, d = 1;
34 uint32_t high = rem >> 32;
35
36 /* Reduce the thing a bit first */
37 res = 0;
38 if (high >= base) {
39 high /= base;
40 res = (uint64_t) high << 32;
41 rem -= (uint64_t) (high*base) << 32;
42 }
43
44 while ((int64_t)b > 0 && b < rem) {
45 b = b+b;
46 d = d+d;
47 }
48
49 do {
50 if (rem >= b) {
51 rem -= b;
52 res += d;
53 }
54 b >>= 1;
55 d >>= 1;
56 } while (d);
57
58 *n = res;
59 return rem;
60}
Linus Torvalds1da177e2005-04-16 15:20:36 -070061EXPORT_SYMBOL(__div64_32);
Nicolas Pitredce1eb92015-11-02 13:02:46 -050062#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Randy Dunlap6ec72e62017-09-30 08:43:42 -070064/**
65 * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
66 * @dividend: 64bit dividend
67 * @divisor: 64bit divisor
68 * @remainder: 64bit remainder
69 */
Roman Zippel2418f4f2008-05-01 04:34:25 -070070#ifndef div_s64_rem
71s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
72{
73 u64 quotient;
74
75 if (dividend < 0) {
76 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
77 *remainder = -*remainder;
78 if (divisor > 0)
79 quotient = -quotient;
80 } else {
81 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
82 if (divisor < 0)
83 quotient = -quotient;
84 }
85 return quotient;
86}
87EXPORT_SYMBOL(div_s64_rem);
88#endif
89
Brian Behlendorf658716d2010-10-26 14:23:10 -070090/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040091 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
92 * @dividend: 64bit dividend
93 * @divisor: 64bit divisor
94 * @remainder: 64bit remainder
95 *
96 * This implementation is a comparable to algorithm used by div64_u64.
97 * But this operation, which includes math for calculating the remainder,
98 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
99 * systems.
100 */
101#ifndef div64_u64_rem
102u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
103{
104 u32 high = divisor >> 32;
105 u64 quot;
106
107 if (high == 0) {
108 u32 rem32;
109 quot = div_u64_rem(dividend, divisor, &rem32);
110 *remainder = rem32;
111 } else {
Stanislaw Gruszkaa7e90c12019-03-07 16:28:18 -0800112 int n = fls(high);
Mike Snitzereb18cba2013-08-20 15:05:17 -0400113 quot = div_u64(dividend >> n, divisor >> n);
114
115 if (quot != 0)
116 quot--;
117
118 *remainder = dividend - quot * divisor;
119 if (*remainder >= divisor) {
120 quot++;
121 *remainder -= divisor;
122 }
123 }
124
125 return quot;
126}
127EXPORT_SYMBOL(div64_u64_rem);
128#endif
129
130/**
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200131 * div64_u64 - unsigned 64bit divide with 64bit divisor
Brian Behlendorf658716d2010-10-26 14:23:10 -0700132 * @dividend: 64bit dividend
133 * @divisor: 64bit divisor
134 *
135 * This implementation is a modified version of the algorithm proposed
136 * by the book 'Hacker's Delight'. The original source and full proof
137 * can be found here and is available for use without restriction.
138 *
Heinrich Schuchardt28ca84e2015-02-26 20:19:03 +0100139 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
Brian Behlendorf658716d2010-10-26 14:23:10 -0700140 */
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200141#ifndef div64_u64
142u64 div64_u64(u64 dividend, u64 divisor)
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700143{
Brian Behlendorf658716d2010-10-26 14:23:10 -0700144 u32 high = divisor >> 32;
145 u64 quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700146
Brian Behlendorf658716d2010-10-26 14:23:10 -0700147 if (high == 0) {
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200148 quot = div_u64(dividend, divisor);
Brian Behlendorf658716d2010-10-26 14:23:10 -0700149 } else {
Stanislaw Gruszkaa7e90c12019-03-07 16:28:18 -0800150 int n = fls(high);
Brian Behlendorf658716d2010-10-26 14:23:10 -0700151 quot = div_u64(dividend >> n, divisor >> n);
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700152
Brian Behlendorf658716d2010-10-26 14:23:10 -0700153 if (quot != 0)
154 quot--;
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200155 if ((dividend - quot * divisor) >= divisor)
Brian Behlendorf658716d2010-10-26 14:23:10 -0700156 quot++;
157 }
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700158
Brian Behlendorf658716d2010-10-26 14:23:10 -0700159 return quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700160}
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200161EXPORT_SYMBOL(div64_u64);
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700162#endif
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700163
Brian Behlendorf658716d2010-10-26 14:23:10 -0700164/**
165 * div64_s64 - signed 64bit divide with 64bit divisor
166 * @dividend: 64bit dividend
167 * @divisor: 64bit divisor
168 */
169#ifndef div64_s64
170s64 div64_s64(s64 dividend, s64 divisor)
171{
172 s64 quot, t;
173
Andrew Morton79211c82015-11-09 14:58:13 -0800174 quot = div64_u64(abs(dividend), abs(divisor));
Brian Behlendorf658716d2010-10-26 14:23:10 -0700175 t = (dividend ^ divisor) >> 63;
176
177 return (quot ^ t) - t;
178}
179EXPORT_SYMBOL(div64_s64);
180#endif
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#endif /* BITS_PER_LONG == 32 */
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200183
184/*
185 * Iterative div/mod for use when dividend is not expected to be much
186 * bigger than divisor.
187 */
188u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
189{
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200190 return __iter_div_u64_rem(dividend, divisor, remainder);
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200191}
192EXPORT_SYMBOL(iter_div_u64_rem);