blob: 503efab2a516988bbb024666227cb6c279c0b9f7 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 64-bit division
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_DIV64
12#define _ASM_DIV64
13
14#include <linux/types.h>
15
16extern void ____unhandled_size_in_do_div___(void);
17
18/*
Richard Henderson5a4b65a2011-03-23 17:42:49 +000019 * Beginning with gcc 4.6, the MDR register is represented explicitly. We
20 * must, therefore, at least explicitly clobber the register when we make
21 * changes to it. The following assembly fragments *could* be rearranged in
22 * order to leave the moves to/from the MDR register to the compiler, but the
23 * gains would be minimal at best.
24 */
25#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
26# define CLOBBER_MDR_CC "mdr", "cc"
27#else
28# define CLOBBER_MDR_CC "cc"
29#endif
30
31/*
David Howellsb920de12008-02-08 04:19:31 -080032 * divide n by base, leaving the result in n and returning the remainder
33 * - we can do this quite efficiently on the MN10300 by cascading the divides
34 * through the MDR register
35 */
36#define do_div(n, base) \
37({ \
38 unsigned __rem = 0; \
39 if (sizeof(n) <= 4) { \
40 asm("mov %1,mdr \n" \
41 "divu %2,%0 \n" \
42 "mov mdr,%1 \n" \
43 : "+r"(n), "=d"(__rem) \
44 : "r"(base), "1"(__rem) \
Richard Henderson5a4b65a2011-03-23 17:42:49 +000045 : CLOBBER_MDR_CC \
David Howellsb920de12008-02-08 04:19:31 -080046 ); \
47 } else if (sizeof(n) <= 8) { \
48 union { \
49 unsigned long long l; \
50 u32 w[2]; \
51 } __quot; \
52 __quot.l = n; \
53 asm("mov %0,mdr \n" /* MDR = 0 */ \
54 "divu %3,%1 \n" \
55 /* __quot.MSL = __div.MSL / base, */ \
56 /* MDR = MDR:__div.MSL % base */ \
57 "divu %3,%2 \n" \
58 /* __quot.LSL = MDR:__div.LSL / base, */ \
59 /* MDR = MDR:__div.LSL % base */ \
60 "mov mdr,%0 \n" \
61 : "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
62 : "r"(base), "0"(__rem), "1"(__quot.w[1]), \
63 "2"(__quot.w[0]) \
Richard Henderson5a4b65a2011-03-23 17:42:49 +000064 : CLOBBER_MDR_CC \
David Howellsb920de12008-02-08 04:19:31 -080065 ); \
66 n = __quot.l; \
67 } else { \
68 ____unhandled_size_in_do_div___(); \
69 } \
70 __rem; \
71})
72
73/*
74 * do an unsigned 32-bit multiply and divide with intermediate 64-bit product
75 * so as not to lose accuracy
76 * - we use the MDR register to hold the MSW of the product
77 */
78static inline __attribute__((const))
79unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
80{
81 unsigned result;
82
83 asm("mulu %2,%0 \n" /* MDR:val = val*mult */
84 "divu %3,%0 \n" /* val = MDR:val/div;
85 * MDR = MDR:val%div */
86 : "=r"(result)
87 : "0"(val), "ir"(mult), "r"(div)
Richard Henderson5a4b65a2011-03-23 17:42:49 +000088 : CLOBBER_MDR_CC
David Howellsb920de12008-02-08 04:19:31 -080089 );
90
91 return result;
92}
93
94/*
95 * do a signed 32-bit multiply and divide with intermediate 64-bit product so
96 * as not to lose accuracy
97 * - we use the MDR register to hold the MSW of the product
98 */
99static inline __attribute__((const))
100signed __muldiv64s(signed val, signed mult, signed div)
101{
102 signed result;
103
104 asm("mul %2,%0 \n" /* MDR:val = val*mult */
105 "div %3,%0 \n" /* val = MDR:val/div;
106 * MDR = MDR:val%div */
107 : "=r"(result)
108 : "0"(val), "ir"(mult), "r"(div)
Richard Henderson5a4b65a2011-03-23 17:42:49 +0000109 : CLOBBER_MDR_CC
David Howellsb920de12008-02-08 04:19:31 -0800110 );
111
112 return result;
113}
114
David Howellsb920de12008-02-08 04:19:31 -0800115#endif /* _ASM_DIV64 */