blob: 7efe65600ceeaef01eb02be21595ef15b462656e [file] [log] [blame]
Stefan Krah1919b7e2012-03-21 18:25:23 +01001/*
2 * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28
29#include "mpdecimal.h"
30#include <stdio.h>
31#include <stdlib.h>
32#include <string.h>
33#include <limits.h>
34#include <math.h>
35#include "basearith.h"
36#include "bits.h"
37#include "convolute.h"
38#include "crt.h"
39#include "errno.h"
40#include "memory.h"
41#include "typearith.h"
42#include "umodarith.h"
43
44#ifdef PPRO
45 #if defined(_MSC_VER)
46 #include <float.h>
47 #pragma fenv_access(on)
48 #elif !defined(__OpenBSD__) && !defined(__NetBSD__)
49 /* C99 */
50 #include <fenv.h>
51 #pragma STDC FENV_ACCESS ON
52 #endif
53#endif
54
55#if defined(__x86_64__) && defined(__GLIBC__) && !defined(__INTEL_COMPILER)
56 #define USE_80BIT_LONG_DOUBLE
57#endif
58
59#if defined(_MSC_VER)
60 #define ALWAYS_INLINE __forceinline
61#elif defined(LEGACY_COMPILER)
62 #define ALWAYS_INLINE
63 #undef inline
64 #define inline
65#else
66 #ifdef TEST_COVERAGE
67 #define ALWAYS_INLINE
68 #else
69 #define ALWAYS_INLINE inline __attribute__ ((always_inline))
70 #endif
71#endif
72
73
74#define MPD_NEWTONDIV_CUTOFF 1024L
75
76#define MPD_NEW_STATIC(name, flags, exp, digits, len) \
77 mpd_uint_t name##_data[MPD_MINALLOC_MAX]; \
78 mpd_t name = {flags|MPD_STATIC|MPD_STATIC_DATA, exp, digits, \
79 len, MPD_MINALLOC_MAX, name##_data}
80
81#define MPD_NEW_CONST(name, flags, exp, digits, len, alloc, initval) \
82 mpd_uint_t name##_data[alloc] = {initval}; \
83 mpd_t name = {flags|MPD_STATIC|MPD_CONST_DATA, exp, digits, \
84 len, alloc, name##_data}
85
86#define MPD_NEW_SHARED(name, a) \
87 mpd_t name = {(a->flags&~MPD_DATAFLAGS)|MPD_STATIC|MPD_SHARED_DATA, \
88 a->exp, a->digits, a->len, a->alloc, a->data}
89
90
91static mpd_uint_t data_one[1] = {1};
92static mpd_uint_t data_zero[1] = {0};
93static const mpd_t one = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_one};
94static const mpd_t minus_one = {MPD_NEG|MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1,
95 data_one};
96static const mpd_t zero = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_zero};
97
98static inline void _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx,
99 uint32_t *status);
100static void _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a,
101 mpd_ssize_t exp);
102static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size);
103
104static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
105 const mpd_context_t *ctx, uint32_t *status);
106static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
107 const mpd_context_t *ctx, uint32_t *status);
Stefan Krah3c23a872012-04-20 19:59:20 +0200108static void _mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a,
109 const mpd_t *b, uint32_t *status);
Stefan Krahb7832932012-06-12 21:06:06 +0200110static inline void _mpd_qpow_uint(mpd_t *result, const mpd_t *base,
111 mpd_uint_t exp, uint8_t resultsign,
112 const mpd_context_t *ctx, uint32_t *status);
Stefan Krah1919b7e2012-03-21 18:25:23 +0100113
114mpd_uint_t mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n);
115
116
117/******************************************************************************/
118/* Performance critical inline functions */
119/******************************************************************************/
120
121#ifdef CONFIG_64
122/* Digits in a word, primarily useful for the most significant word. */
123ALWAYS_INLINE int
124mpd_word_digits(mpd_uint_t word)
125{
126 if (word < mpd_pow10[9]) {
127 if (word < mpd_pow10[4]) {
128 if (word < mpd_pow10[2]) {
129 return (word < mpd_pow10[1]) ? 1 : 2;
130 }
131 return (word < mpd_pow10[3]) ? 3 : 4;
132 }
133 if (word < mpd_pow10[6]) {
134 return (word < mpd_pow10[5]) ? 5 : 6;
135 }
136 if (word < mpd_pow10[8]) {
137 return (word < mpd_pow10[7]) ? 7 : 8;
138 }
139 return 9;
140 }
141 if (word < mpd_pow10[14]) {
142 if (word < mpd_pow10[11]) {
143 return (word < mpd_pow10[10]) ? 10 : 11;
144 }
145 if (word < mpd_pow10[13]) {
146 return (word < mpd_pow10[12]) ? 12 : 13;
147 }
148 return 14;
149 }
150 if (word < mpd_pow10[18]) {
151 if (word < mpd_pow10[16]) {
152 return (word < mpd_pow10[15]) ? 15 : 16;
153 }
154 return (word < mpd_pow10[17]) ? 17 : 18;
155 }
156
157 return (word < mpd_pow10[19]) ? 19 : 20;
158}
159#else
160ALWAYS_INLINE int
161mpd_word_digits(mpd_uint_t word)
162{
163 if (word < mpd_pow10[4]) {
164 if (word < mpd_pow10[2]) {
165 return (word < mpd_pow10[1]) ? 1 : 2;
166 }
167 return (word < mpd_pow10[3]) ? 3 : 4;
168 }
169 if (word < mpd_pow10[6]) {
170 return (word < mpd_pow10[5]) ? 5 : 6;
171 }
172 if (word < mpd_pow10[8]) {
173 return (word < mpd_pow10[7]) ? 7 : 8;
174 }
175
176 return (word < mpd_pow10[9]) ? 9 : 10;
177}
178#endif
179
180
181/* Adjusted exponent */
182ALWAYS_INLINE mpd_ssize_t
183mpd_adjexp(const mpd_t *dec)
184{
185 return (dec->exp + dec->digits) - 1;
186}
187
188/* Etiny */
189ALWAYS_INLINE mpd_ssize_t
190mpd_etiny(const mpd_context_t *ctx)
191{
192 return ctx->emin - (ctx->prec - 1);
193}
194
195/* Etop: used for folding down in IEEE clamping */
196ALWAYS_INLINE mpd_ssize_t
197mpd_etop(const mpd_context_t *ctx)
198{
199 return ctx->emax - (ctx->prec - 1);
200}
201
202/* Most significant word */
203ALWAYS_INLINE mpd_uint_t
204mpd_msword(const mpd_t *dec)
205{
206 assert(dec->len > 0);
207 return dec->data[dec->len-1];
208}
209
210/* Most significant digit of a word */
211inline mpd_uint_t
212mpd_msd(mpd_uint_t word)
213{
214 int n;
215
216 n = mpd_word_digits(word);
217 return word / mpd_pow10[n-1];
218}
219
220/* Least significant digit of a word */
221ALWAYS_INLINE mpd_uint_t
222mpd_lsd(mpd_uint_t word)
223{
224 return word % 10;
225}
226
227/* Coefficient size needed to store 'digits' */
228ALWAYS_INLINE mpd_ssize_t
229mpd_digits_to_size(mpd_ssize_t digits)
230{
231 mpd_ssize_t q, r;
232
233 _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
234 return (r == 0) ? q : q+1;
235}
236
237/* Number of digits in the exponent. Not defined for MPD_SSIZE_MIN. */
238inline int
239mpd_exp_digits(mpd_ssize_t exp)
240{
241 exp = (exp < 0) ? -exp : exp;
242 return mpd_word_digits(exp);
243}
244
245/* Canonical */
246ALWAYS_INLINE int
247mpd_iscanonical(const mpd_t *dec UNUSED)
248{
249 return 1;
250}
251
252/* Finite */
253ALWAYS_INLINE int
254mpd_isfinite(const mpd_t *dec)
255{
256 return !(dec->flags & MPD_SPECIAL);
257}
258
259/* Infinite */
260ALWAYS_INLINE int
261mpd_isinfinite(const mpd_t *dec)
262{
263 return dec->flags & MPD_INF;
264}
265
266/* NaN */
267ALWAYS_INLINE int
268mpd_isnan(const mpd_t *dec)
269{
270 return dec->flags & (MPD_NAN|MPD_SNAN);
271}
272
273/* Negative */
274ALWAYS_INLINE int
275mpd_isnegative(const mpd_t *dec)
276{
277 return dec->flags & MPD_NEG;
278}
279
280/* Positive */
281ALWAYS_INLINE int
282mpd_ispositive(const mpd_t *dec)
283{
284 return !(dec->flags & MPD_NEG);
285}
286
287/* qNaN */
288ALWAYS_INLINE int
289mpd_isqnan(const mpd_t *dec)
290{
291 return dec->flags & MPD_NAN;
292}
293
294/* Signed */
295ALWAYS_INLINE int
296mpd_issigned(const mpd_t *dec)
297{
298 return dec->flags & MPD_NEG;
299}
300
301/* sNaN */
302ALWAYS_INLINE int
303mpd_issnan(const mpd_t *dec)
304{
305 return dec->flags & MPD_SNAN;
306}
307
308/* Special */
309ALWAYS_INLINE int
310mpd_isspecial(const mpd_t *dec)
311{
312 return dec->flags & MPD_SPECIAL;
313}
314
315/* Zero */
316ALWAYS_INLINE int
317mpd_iszero(const mpd_t *dec)
318{
319 return !mpd_isspecial(dec) && mpd_msword(dec) == 0;
320}
321
322/* Test for zero when specials have been ruled out already */
323ALWAYS_INLINE int
324mpd_iszerocoeff(const mpd_t *dec)
325{
326 return mpd_msword(dec) == 0;
327}
328
329/* Normal */
330inline int
331mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx)
332{
333 if (mpd_isspecial(dec)) return 0;
334 if (mpd_iszerocoeff(dec)) return 0;
335
336 return mpd_adjexp(dec) >= ctx->emin;
337}
338
339/* Subnormal */
340inline int
341mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx)
342{
343 if (mpd_isspecial(dec)) return 0;
344 if (mpd_iszerocoeff(dec)) return 0;
345
346 return mpd_adjexp(dec) < ctx->emin;
347}
348
349/* Odd word */
350ALWAYS_INLINE int
351mpd_isoddword(mpd_uint_t word)
352{
353 return word & 1;
354}
355
356/* Odd coefficient */
357ALWAYS_INLINE int
358mpd_isoddcoeff(const mpd_t *dec)
359{
360 return mpd_isoddword(dec->data[0]);
361}
362
363/* 0 if dec is positive, 1 if dec is negative */
364ALWAYS_INLINE uint8_t
365mpd_sign(const mpd_t *dec)
366{
367 return dec->flags & MPD_NEG;
368}
369
370/* 1 if dec is positive, -1 if dec is negative */
371ALWAYS_INLINE int
372mpd_arith_sign(const mpd_t *dec)
373{
374 return 1 - 2 * mpd_isnegative(dec);
375}
376
377/* Radix */
378ALWAYS_INLINE long
379mpd_radix(void)
380{
381 return 10;
382}
383
384/* Dynamic decimal */
385ALWAYS_INLINE int
386mpd_isdynamic(mpd_t *dec)
387{
388 return !(dec->flags & MPD_STATIC);
389}
390
391/* Static decimal */
392ALWAYS_INLINE int
393mpd_isstatic(mpd_t *dec)
394{
395 return dec->flags & MPD_STATIC;
396}
397
398/* Data of decimal is dynamic */
399ALWAYS_INLINE int
400mpd_isdynamic_data(mpd_t *dec)
401{
402 return !(dec->flags & MPD_DATAFLAGS);
403}
404
405/* Data of decimal is static */
406ALWAYS_INLINE int
407mpd_isstatic_data(mpd_t *dec)
408{
409 return dec->flags & MPD_STATIC_DATA;
410}
411
412/* Data of decimal is shared */
413ALWAYS_INLINE int
414mpd_isshared_data(mpd_t *dec)
415{
416 return dec->flags & MPD_SHARED_DATA;
417}
418
419/* Data of decimal is const */
420ALWAYS_INLINE int
421mpd_isconst_data(mpd_t *dec)
422{
423 return dec->flags & MPD_CONST_DATA;
424}
425
426
427/******************************************************************************/
428/* Inline memory handling */
429/******************************************************************************/
430
431/* Fill destination with zeros */
432ALWAYS_INLINE void
433mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len)
434{
435 mpd_size_t i;
436
437 for (i = 0; i < len; i++) {
438 dest[i] = 0;
439 }
440}
441
442/* Free a decimal */
443ALWAYS_INLINE void
444mpd_del(mpd_t *dec)
445{
446 if (mpd_isdynamic_data(dec)) {
447 mpd_free(dec->data);
448 }
449 if (mpd_isdynamic(dec)) {
450 mpd_free(dec);
451 }
452}
453
454/*
455 * Resize the coefficient. Existing data up to 'nwords' is left untouched.
456 * Return 1 on success, 0 otherwise.
457 *
Stefan Krahec766a62012-04-10 23:11:54 +0200458 * Input invariant: MPD_MINALLOC <= result->alloc.
Stefan Krah1919b7e2012-03-21 18:25:23 +0100459 *
Stefan Krah7b544ca2012-04-10 23:08:29 +0200460 * Case nwords == result->alloc:
461 * 'result' is unchanged. Return 1.
462 *
Stefan Krah1919b7e2012-03-21 18:25:23 +0100463 * Case nwords > result->alloc:
464 * Case realloc success:
465 * The value of 'result' does not change. Return 1.
466 * Case realloc failure:
467 * 'result' is NaN, status is updated with MPD_Malloc_error. Return 0.
468 *
469 * Case nwords < result->alloc:
Stefan Krah7b544ca2012-04-10 23:08:29 +0200470 * Case is_static_data or realloc failure [1]:
Stefan Krah1919b7e2012-03-21 18:25:23 +0100471 * 'result' is unchanged. Return 1.
472 * Case realloc success:
473 * The value of result is undefined (expected). Return 1.
474 *
Stefan Krah1919b7e2012-03-21 18:25:23 +0100475 *
476 * [1] In that case the old (now oversized) area is still valid.
477 */
478ALWAYS_INLINE int
479mpd_qresize(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
480{
481 assert(!mpd_isconst_data(result)); /* illegal operation for a const */
482 assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
Stefan Krahf69aef72012-04-09 20:47:57 +0200483 assert(MPD_MINALLOC <= result->alloc);
Stefan Krah1919b7e2012-03-21 18:25:23 +0100484
Stefan Krahf69aef72012-04-09 20:47:57 +0200485 nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
486 if (nwords == result->alloc) {
487 return 1;
488 }
Stefan Krah1919b7e2012-03-21 18:25:23 +0100489 if (mpd_isstatic_data(result)) {
490 if (nwords > result->alloc) {
491 return mpd_switch_to_dyn(result, nwords, status);
492 }
Stefan Krahf69aef72012-04-09 20:47:57 +0200493 return 1;
Stefan Krah1919b7e2012-03-21 18:25:23 +0100494 }
495
Stefan Krahf69aef72012-04-09 20:47:57 +0200496 return mpd_realloc_dyn(result, nwords, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +0100497}
498
499/* Same as mpd_qresize, but the complete coefficient (including the old
500 * memory area!) is initialized to zero. */
501ALWAYS_INLINE int
502mpd_qresize_zero(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
503{
504 assert(!mpd_isconst_data(result)); /* illegal operation for a const */
505 assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
Stefan Krahf69aef72012-04-09 20:47:57 +0200506 assert(MPD_MINALLOC <= result->alloc);
Stefan Krah1919b7e2012-03-21 18:25:23 +0100507
Stefan Krahf69aef72012-04-09 20:47:57 +0200508 nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
509 if (nwords != result->alloc) {
510 if (mpd_isstatic_data(result)) {
511 if (nwords > result->alloc) {
512 return mpd_switch_to_dyn_zero(result, nwords, status);
513 }
Stefan Krah1919b7e2012-03-21 18:25:23 +0100514 }
Stefan Krahf69aef72012-04-09 20:47:57 +0200515 else if (!mpd_realloc_dyn(result, nwords, status)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +0100516 return 0;
517 }
518 }
519
520 mpd_uint_zero(result->data, nwords);
Stefan Krah1919b7e2012-03-21 18:25:23 +0100521 return 1;
522}
523
524/*
525 * Reduce memory size for the coefficient to MPD_MINALLOC. In theory,
526 * realloc may fail even when reducing the memory size. But in that case
527 * the old memory area is always big enough, so checking for MPD_Malloc_error
528 * is not imperative.
529 */
530ALWAYS_INLINE void
531mpd_minalloc(mpd_t *result)
532{
533 assert(!mpd_isconst_data(result)); /* illegal operation for a const */
534 assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
535
536 if (!mpd_isstatic_data(result) && result->alloc > MPD_MINALLOC) {
537 uint8_t err = 0;
538 result->data = mpd_realloc(result->data, MPD_MINALLOC,
539 sizeof *result->data, &err);
540 if (!err) {
541 result->alloc = MPD_MINALLOC;
542 }
543 }
544}
545
546int
547mpd_resize(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
548{
549 uint32_t status = 0;
550 if (!mpd_qresize(result, nwords, &status)) {
551 mpd_addstatus_raise(ctx, status);
552 return 0;
553 }
554 return 1;
555}
556
557int
558mpd_resize_zero(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
559{
560 uint32_t status = 0;
561 if (!mpd_qresize_zero(result, nwords, &status)) {
562 mpd_addstatus_raise(ctx, status);
563 return 0;
564 }
565 return 1;
566}
567
568
569/******************************************************************************/
570/* Set attributes of a decimal */
571/******************************************************************************/
572
573/* Set digits. Assumption: result->len is initialized and > 0. */
574inline void
575mpd_setdigits(mpd_t *result)
576{
577 mpd_ssize_t wdigits = mpd_word_digits(mpd_msword(result));
578 result->digits = wdigits + (result->len-1) * MPD_RDIGITS;
579}
580
581/* Set sign */
582ALWAYS_INLINE void
583mpd_set_sign(mpd_t *result, uint8_t sign)
584{
585 result->flags &= ~MPD_NEG;
586 result->flags |= sign;
587}
588
589/* Copy sign from another decimal */
590ALWAYS_INLINE void
591mpd_signcpy(mpd_t *result, mpd_t *a)
592{
593 uint8_t sign = a->flags&MPD_NEG;
594
595 result->flags &= ~MPD_NEG;
596 result->flags |= sign;
597}
598
599/* Set infinity */
600ALWAYS_INLINE void
601mpd_set_infinity(mpd_t *result)
602{
603 result->flags &= ~MPD_SPECIAL;
604 result->flags |= MPD_INF;
605}
606
607/* Set qNaN */
608ALWAYS_INLINE void
609mpd_set_qnan(mpd_t *result)
610{
611 result->flags &= ~MPD_SPECIAL;
612 result->flags |= MPD_NAN;
613}
614
615/* Set sNaN */
616ALWAYS_INLINE void
617mpd_set_snan(mpd_t *result)
618{
619 result->flags &= ~MPD_SPECIAL;
620 result->flags |= MPD_SNAN;
621}
622
623/* Set to negative */
624ALWAYS_INLINE void
625mpd_set_negative(mpd_t *result)
626{
627 result->flags |= MPD_NEG;
628}
629
630/* Set to positive */
631ALWAYS_INLINE void
632mpd_set_positive(mpd_t *result)
633{
634 result->flags &= ~MPD_NEG;
635}
636
637/* Set to dynamic */
638ALWAYS_INLINE void
639mpd_set_dynamic(mpd_t *result)
640{
641 result->flags &= ~MPD_STATIC;
642}
643
644/* Set to static */
645ALWAYS_INLINE void
646mpd_set_static(mpd_t *result)
647{
648 result->flags |= MPD_STATIC;
649}
650
651/* Set data to dynamic */
652ALWAYS_INLINE void
653mpd_set_dynamic_data(mpd_t *result)
654{
655 result->flags &= ~MPD_DATAFLAGS;
656}
657
658/* Set data to static */
659ALWAYS_INLINE void
660mpd_set_static_data(mpd_t *result)
661{
662 result->flags &= ~MPD_DATAFLAGS;
663 result->flags |= MPD_STATIC_DATA;
664}
665
666/* Set data to shared */
667ALWAYS_INLINE void
668mpd_set_shared_data(mpd_t *result)
669{
670 result->flags &= ~MPD_DATAFLAGS;
671 result->flags |= MPD_SHARED_DATA;
672}
673
674/* Set data to const */
675ALWAYS_INLINE void
676mpd_set_const_data(mpd_t *result)
677{
678 result->flags &= ~MPD_DATAFLAGS;
679 result->flags |= MPD_CONST_DATA;
680}
681
682/* Clear flags, preserving memory attributes. */
683ALWAYS_INLINE void
684mpd_clear_flags(mpd_t *result)
685{
686 result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
687}
688
689/* Set flags, preserving memory attributes. */
690ALWAYS_INLINE void
691mpd_set_flags(mpd_t *result, uint8_t flags)
692{
693 result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
694 result->flags |= flags;
695}
696
697/* Copy flags, preserving memory attributes of result. */
698ALWAYS_INLINE void
699mpd_copy_flags(mpd_t *result, const mpd_t *a)
700{
701 uint8_t aflags = a->flags;
702 result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
703 result->flags |= (aflags & ~(MPD_STATIC|MPD_DATAFLAGS));
704}
705
706/* Initialize a workcontext from ctx. Set traps, flags and newtrap to 0. */
707static inline void
708mpd_workcontext(mpd_context_t *workctx, const mpd_context_t *ctx)
709{
710 workctx->prec = ctx->prec;
711 workctx->emax = ctx->emax;
712 workctx->emin = ctx->emin;
713 workctx->round = ctx->round;
714 workctx->traps = 0;
715 workctx->status = 0;
716 workctx->newtrap = 0;
717 workctx->clamp = ctx->clamp;
718 workctx->allcr = ctx->allcr;
719}
720
721
722/******************************************************************************/
723/* Getting and setting parts of decimals */
724/******************************************************************************/
725
726/* Flip the sign of a decimal */
727static inline void
728_mpd_negate(mpd_t *dec)
729{
730 dec->flags ^= MPD_NEG;
731}
732
733/* Set coefficient to zero */
734void
735mpd_zerocoeff(mpd_t *result)
736{
737 mpd_minalloc(result);
738 result->digits = 1;
739 result->len = 1;
740 result->data[0] = 0;
741}
742
743/* Set the coefficient to all nines. */
744void
745mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
746{
747 mpd_ssize_t len, r;
748
749 _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
750 len = (r == 0) ? len : len+1;
751
752 if (!mpd_qresize(result, len, status)) {
753 return;
754 }
755
756 result->len = len;
757 result->digits = ctx->prec;
758
759 --len;
760 if (r > 0) {
761 result->data[len--] = mpd_pow10[r]-1;
762 }
763 for (; len >= 0; --len) {
764 result->data[len] = MPD_RADIX-1;
765 }
766}
767
768/*
769 * Cut off the most significant digits so that the rest fits in ctx->prec.
770 * Cannot fail.
771 */
772static void
773_mpd_cap(mpd_t *result, const mpd_context_t *ctx)
774{
775 uint32_t dummy;
776 mpd_ssize_t len, r;
777
778 if (result->len > 0 && result->digits > ctx->prec) {
779 _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
780 len = (r == 0) ? len : len+1;
781
782 if (r != 0) {
783 result->data[len-1] %= mpd_pow10[r];
784 }
785
786 len = _mpd_real_size(result->data, len);
787 /* resize to fewer words cannot fail */
788 mpd_qresize(result, len, &dummy);
789 result->len = len;
790 mpd_setdigits(result);
791 }
792 if (mpd_iszero(result)) {
793 _settriple(result, mpd_sign(result), 0, result->exp);
794 }
795}
796
797/*
798 * Cut off the most significant digits of a NaN payload so that the rest
799 * fits in ctx->prec - ctx->clamp. Cannot fail.
800 */
801static void
802_mpd_fix_nan(mpd_t *result, const mpd_context_t *ctx)
803{
804 uint32_t dummy;
805 mpd_ssize_t prec;
806 mpd_ssize_t len, r;
807
808 prec = ctx->prec - ctx->clamp;
809 if (result->len > 0 && result->digits > prec) {
810 if (prec == 0) {
811 mpd_minalloc(result);
812 result->len = result->digits = 0;
813 }
814 else {
815 _mpd_idiv_word(&len, &r, prec, MPD_RDIGITS);
816 len = (r == 0) ? len : len+1;
817
818 if (r != 0) {
819 result->data[len-1] %= mpd_pow10[r];
820 }
821
822 len = _mpd_real_size(result->data, len);
823 /* resize to fewer words cannot fail */
824 mpd_qresize(result, len, &dummy);
825 result->len = len;
826 mpd_setdigits(result);
827 if (mpd_iszerocoeff(result)) {
828 /* NaN0 is not a valid representation */
829 result->len = result->digits = 0;
830 }
831 }
832 }
833}
834
835/*
836 * Get n most significant digits from a decimal, where 0 < n <= MPD_UINT_DIGITS.
837 * Assumes MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for 32 and 64 bit
838 * machines.
839 *
840 * The result of the operation will be in lo. If the operation is impossible,
841 * hi will be nonzero. This is used to indicate an error.
842 */
843static inline void
844_mpd_get_msdigits(mpd_uint_t *hi, mpd_uint_t *lo, const mpd_t *dec,
845 unsigned int n)
846{
847 mpd_uint_t r, tmp;
848
849 assert(0 < n && n <= MPD_RDIGITS+1);
850
851 _mpd_div_word(&tmp, &r, dec->digits, MPD_RDIGITS);
852 r = (r == 0) ? MPD_RDIGITS : r; /* digits in the most significant word */
853
854 *hi = 0;
855 *lo = dec->data[dec->len-1];
856 if (n <= r) {
857 *lo /= mpd_pow10[r-n];
858 }
859 else if (dec->len > 1) {
860 /* at this point 1 <= r < n <= MPD_RDIGITS+1 */
861 _mpd_mul_words(hi, lo, *lo, mpd_pow10[n-r]);
862 tmp = dec->data[dec->len-2] / mpd_pow10[MPD_RDIGITS-(n-r)];
863 *lo = *lo + tmp;
864 if (*lo < tmp) (*hi)++;
865 }
866}
867
868
869/******************************************************************************/
870/* Gathering information about a decimal */
871/******************************************************************************/
872
873/* The real size of the coefficient without leading zero words. */
874static inline mpd_ssize_t
875_mpd_real_size(mpd_uint_t *data, mpd_ssize_t size)
876{
877 while (size > 1 && data[size-1] == 0) {
878 size--;
879 }
880
881 return size;
882}
883
884/* Return number of trailing zeros. No errors are possible. */
885mpd_ssize_t
886mpd_trail_zeros(const mpd_t *dec)
887{
888 mpd_uint_t word;
889 mpd_ssize_t i, tz = 0;
890
891 for (i=0; i < dec->len; ++i) {
892 if (dec->data[i] != 0) {
893 word = dec->data[i];
894 tz = i * MPD_RDIGITS;
895 while (word % 10 == 0) {
896 word /= 10;
897 tz++;
898 }
899 break;
900 }
901 }
902
903 return tz;
904}
905
906/* Integer: Undefined for specials */
907static int
908_mpd_isint(const mpd_t *dec)
909{
910 mpd_ssize_t tz;
911
912 if (mpd_iszerocoeff(dec)) {
913 return 1;
914 }
915
916 tz = mpd_trail_zeros(dec);
917 return (dec->exp + tz >= 0);
918}
919
920/* Integer */
921int
922mpd_isinteger(const mpd_t *dec)
923{
924 if (mpd_isspecial(dec)) {
925 return 0;
926 }
927 return _mpd_isint(dec);
928}
929
930/* Word is a power of 10 */
931static int
932mpd_word_ispow10(mpd_uint_t word)
933{
934 int n;
935
936 n = mpd_word_digits(word);
937 if (word == mpd_pow10[n-1]) {
938 return 1;
939 }
940
941 return 0;
942}
943
944/* Coefficient is a power of 10 */
945static int
946mpd_coeff_ispow10(const mpd_t *dec)
947{
948 if (mpd_word_ispow10(mpd_msword(dec))) {
949 if (_mpd_isallzero(dec->data, dec->len-1)) {
950 return 1;
951 }
952 }
953
954 return 0;
955}
956
957/* All digits of a word are nines */
958static int
959mpd_word_isallnine(mpd_uint_t word)
960{
961 int n;
962
963 n = mpd_word_digits(word);
964 if (word == mpd_pow10[n]-1) {
965 return 1;
966 }
967
968 return 0;
969}
970
971/* All digits of the coefficient are nines */
972static int
973mpd_coeff_isallnine(const mpd_t *dec)
974{
975 if (mpd_word_isallnine(mpd_msword(dec))) {
976 if (_mpd_isallnine(dec->data, dec->len-1)) {
977 return 1;
978 }
979 }
980
981 return 0;
982}
983
984/* Odd decimal: Undefined for non-integers! */
985int
986mpd_isodd(const mpd_t *dec)
987{
988 mpd_uint_t q, r;
989 assert(mpd_isinteger(dec));
990 if (mpd_iszerocoeff(dec)) return 0;
991 if (dec->exp < 0) {
992 _mpd_div_word(&q, &r, -dec->exp, MPD_RDIGITS);
993 q = dec->data[q] / mpd_pow10[r];
994 return mpd_isoddword(q);
995 }
996 return dec->exp == 0 && mpd_isoddword(dec->data[0]);
997}
998
999/* Even: Undefined for non-integers! */
1000int
1001mpd_iseven(const mpd_t *dec)
1002{
1003 return !mpd_isodd(dec);
1004}
1005
1006/******************************************************************************/
1007/* Getting and setting decimals */
1008/******************************************************************************/
1009
1010/* Internal function: Set a static decimal from a triple, no error checking. */
1011static void
1012_ssettriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
1013{
1014 mpd_set_flags(result, sign);
1015 result->exp = exp;
1016 _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
1017 result->len = (result->data[1] == 0) ? 1 : 2;
1018 mpd_setdigits(result);
1019}
1020
1021/* Internal function: Set a decimal from a triple, no error checking. */
1022static void
1023_settriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
1024{
1025 mpd_minalloc(result);
1026 mpd_set_flags(result, sign);
1027 result->exp = exp;
1028 _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
1029 result->len = (result->data[1] == 0) ? 1 : 2;
1030 mpd_setdigits(result);
1031}
1032
1033/* Set a special number from a triple */
1034void
1035mpd_setspecial(mpd_t *result, uint8_t sign, uint8_t type)
1036{
1037 mpd_minalloc(result);
1038 result->flags &= ~(MPD_NEG|MPD_SPECIAL);
1039 result->flags |= (sign|type);
1040 result->exp = result->digits = result->len = 0;
1041}
1042
1043/* Set result of NaN with an error status */
1044void
1045mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status)
1046{
1047 mpd_minalloc(result);
1048 mpd_set_qnan(result);
1049 mpd_set_positive(result);
1050 result->exp = result->digits = result->len = 0;
1051 *status |= flags;
1052}
1053
1054/* quietly set a static decimal from an mpd_ssize_t */
1055void
1056mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
1057 uint32_t *status)
1058{
1059 mpd_uint_t u;
1060 uint8_t sign = MPD_POS;
1061
1062 if (a < 0) {
1063 if (a == MPD_SSIZE_MIN) {
1064 u = (mpd_uint_t)MPD_SSIZE_MAX +
1065 (-(MPD_SSIZE_MIN+MPD_SSIZE_MAX));
1066 }
1067 else {
1068 u = -a;
1069 }
1070 sign = MPD_NEG;
1071 }
1072 else {
1073 u = a;
1074 }
1075 _ssettriple(result, sign, u, 0);
1076 mpd_qfinalize(result, ctx, status);
1077}
1078
1079/* quietly set a static decimal from an mpd_uint_t */
1080void
1081mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
1082 uint32_t *status)
1083{
1084 _ssettriple(result, MPD_POS, a, 0);
1085 mpd_qfinalize(result, ctx, status);
1086}
1087
1088/* quietly set a static decimal from an int32_t */
1089void
1090mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
1091 uint32_t *status)
1092{
1093 mpd_qsset_ssize(result, a, ctx, status);
1094}
1095
1096/* quietly set a static decimal from a uint32_t */
1097void
1098mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
1099 uint32_t *status)
1100{
1101 mpd_qsset_uint(result, a, ctx, status);
1102}
1103
1104#ifdef CONFIG_64
1105/* quietly set a static decimal from an int64_t */
1106void
1107mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1108 uint32_t *status)
1109{
1110 mpd_qsset_ssize(result, a, ctx, status);
1111}
1112
1113/* quietly set a static decimal from a uint64_t */
1114void
1115mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1116 uint32_t *status)
1117{
1118 mpd_qsset_uint(result, a, ctx, status);
1119}
1120#endif
1121
1122/* quietly set a decimal from an mpd_ssize_t */
1123void
1124mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
1125 uint32_t *status)
1126{
1127 mpd_minalloc(result);
1128 mpd_qsset_ssize(result, a, ctx, status);
1129}
1130
1131/* quietly set a decimal from an mpd_uint_t */
1132void
1133mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
1134 uint32_t *status)
1135{
1136 _settriple(result, MPD_POS, a, 0);
1137 mpd_qfinalize(result, ctx, status);
1138}
1139
1140/* quietly set a decimal from an int32_t */
1141void
1142mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
1143 uint32_t *status)
1144{
1145 mpd_qset_ssize(result, a, ctx, status);
1146}
1147
1148/* quietly set a decimal from a uint32_t */
1149void
1150mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
1151 uint32_t *status)
1152{
1153 mpd_qset_uint(result, a, ctx, status);
1154}
1155
1156#if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
1157/* set a decimal from a uint64_t */
1158static void
1159_c32setu64(mpd_t *result, uint64_t u, uint8_t sign, uint32_t *status)
1160{
1161 mpd_uint_t w[3];
1162 uint64_t q;
1163 int i, len;
1164
1165 len = 0;
1166 do {
1167 q = u / MPD_RADIX;
1168 w[len] = (mpd_uint_t)(u - q * MPD_RADIX);
1169 u = q; len++;
1170 } while (u != 0);
1171
1172 if (!mpd_qresize(result, len, status)) {
1173 return;
1174 }
1175 for (i = 0; i < len; i++) {
1176 result->data[i] = w[i];
1177 }
1178
1179 mpd_set_sign(result, sign);
1180 result->exp = 0;
1181 result->len = len;
1182 mpd_setdigits(result);
1183}
1184
1185static void
1186_c32_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1187 uint32_t *status)
1188{
1189 _c32setu64(result, a, MPD_POS, status);
1190 mpd_qfinalize(result, ctx, status);
1191}
1192
1193/* set a decimal from an int64_t */
1194static void
1195_c32_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1196 uint32_t *status)
1197{
1198 uint64_t u;
1199 uint8_t sign = MPD_POS;
1200
1201 if (a < 0) {
1202 if (a == INT64_MIN) {
1203 u = (uint64_t)INT64_MAX + (-(INT64_MIN+INT64_MAX));
1204 }
1205 else {
1206 u = -a;
1207 }
1208 sign = MPD_NEG;
1209 }
1210 else {
1211 u = a;
1212 }
1213 _c32setu64(result, u, sign, status);
1214 mpd_qfinalize(result, ctx, status);
1215}
1216#endif /* CONFIG_32 && !LEGACY_COMPILER */
1217
1218#ifndef LEGACY_COMPILER
1219/* quietly set a decimal from an int64_t */
1220void
1221mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
1222 uint32_t *status)
1223{
1224#ifdef CONFIG_64
1225 mpd_qset_ssize(result, a, ctx, status);
1226#else
1227 _c32_qset_i64(result, a, ctx, status);
1228#endif
1229}
1230
1231/* quietly set a decimal from a uint64_t */
1232void
1233mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
1234 uint32_t *status)
1235{
1236#ifdef CONFIG_64
1237 mpd_qset_uint(result, a, ctx, status);
1238#else
1239 _c32_qset_u64(result, a, ctx, status);
1240#endif
1241}
1242#endif /* !LEGACY_COMPILER */
1243
1244
1245/*
1246 * Quietly get an mpd_uint_t from a decimal. Assumes
1247 * MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for
1248 * 32 and 64 bit machines.
1249 *
1250 * If the operation is impossible, MPD_Invalid_operation is set.
1251 */
1252static mpd_uint_t
1253_mpd_qget_uint(int use_sign, const mpd_t *a, uint32_t *status)
1254{
1255 mpd_t tmp;
1256 mpd_uint_t tmp_data[2];
1257 mpd_uint_t lo, hi;
1258
1259 if (mpd_isspecial(a)) {
1260 *status |= MPD_Invalid_operation;
1261 return MPD_UINT_MAX;
1262 }
1263 if (mpd_iszero(a)) {
1264 return 0;
1265 }
1266 if (use_sign && mpd_isnegative(a)) {
1267 *status |= MPD_Invalid_operation;
1268 return MPD_UINT_MAX;
1269 }
1270
1271 if (a->digits+a->exp > MPD_RDIGITS+1) {
1272 *status |= MPD_Invalid_operation;
1273 return MPD_UINT_MAX;
1274 }
1275
1276 if (a->exp < 0) {
1277 if (!_mpd_isint(a)) {
1278 *status |= MPD_Invalid_operation;
1279 return MPD_UINT_MAX;
1280 }
1281 /* At this point a->digits+a->exp <= MPD_RDIGITS+1,
1282 * so the shift fits. */
1283 tmp.data = tmp_data;
Stefan Krah140893c2012-04-18 17:48:34 +02001284 tmp.flags = MPD_STATIC|MPD_STATIC_DATA;
1285 tmp.alloc = 2;
Stefan Krah1919b7e2012-03-21 18:25:23 +01001286 mpd_qsshiftr(&tmp, a, -a->exp);
1287 tmp.exp = 0;
1288 a = &tmp;
1289 }
1290
1291 _mpd_get_msdigits(&hi, &lo, a, MPD_RDIGITS+1);
1292 if (hi) {
1293 *status |= MPD_Invalid_operation;
1294 return MPD_UINT_MAX;
1295 }
1296
1297 if (a->exp > 0) {
1298 _mpd_mul_words(&hi, &lo, lo, mpd_pow10[a->exp]);
1299 if (hi) {
1300 *status |= MPD_Invalid_operation;
1301 return MPD_UINT_MAX;
1302 }
1303 }
1304
1305 return lo;
1306}
1307
1308/*
1309 * Sets Invalid_operation for:
1310 * - specials
1311 * - negative numbers (except negative zero)
1312 * - non-integers
1313 * - overflow
1314 */
1315mpd_uint_t
1316mpd_qget_uint(const mpd_t *a, uint32_t *status)
1317{
1318 return _mpd_qget_uint(1, a, status);
1319}
1320
1321/* Same as above, but gets the absolute value, i.e. the sign is ignored. */
1322mpd_uint_t
1323mpd_qabs_uint(const mpd_t *a, uint32_t *status)
1324{
1325 return _mpd_qget_uint(0, a, status);
1326}
1327
1328/* quietly get an mpd_ssize_t from a decimal */
1329mpd_ssize_t
1330mpd_qget_ssize(const mpd_t *a, uint32_t *status)
1331{
1332 mpd_uint_t u;
1333 int isneg;
1334
1335 u = mpd_qabs_uint(a, status);
1336 if (*status&MPD_Invalid_operation) {
1337 return MPD_SSIZE_MAX;
1338 }
1339
1340 isneg = mpd_isnegative(a);
1341 if (u <= MPD_SSIZE_MAX) {
1342 return isneg ? -((mpd_ssize_t)u) : (mpd_ssize_t)u;
1343 }
Stefan Krah6369f772012-04-18 17:57:56 +02001344 else if (isneg && u+(MPD_SSIZE_MIN+MPD_SSIZE_MAX) == MPD_SSIZE_MAX) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01001345 return MPD_SSIZE_MIN;
1346 }
1347
1348 *status |= MPD_Invalid_operation;
1349 return MPD_SSIZE_MAX;
1350}
1351
1352#ifdef CONFIG_64
1353/* quietly get a uint64_t from a decimal */
1354uint64_t
1355mpd_qget_u64(const mpd_t *a, uint32_t *status)
1356{
1357 return mpd_qget_uint(a, status);
1358}
1359
1360/* quietly get an int64_t from a decimal */
1361int64_t
1362mpd_qget_i64(const mpd_t *a, uint32_t *status)
1363{
1364 return mpd_qget_ssize(a, status);
1365}
1366#else
1367/* quietly get a uint32_t from a decimal */
1368uint32_t
1369mpd_qget_u32(const mpd_t *a, uint32_t *status)
1370{
1371 return mpd_qget_uint(a, status);
1372}
1373
1374/* quietly get an int32_t from a decimal */
1375int32_t
1376mpd_qget_i32(const mpd_t *a, uint32_t *status)
1377{
1378 return mpd_qget_ssize(a, status);
1379}
1380#endif
1381
1382
1383/******************************************************************************/
1384/* Filtering input of functions, finalizing output of functions */
1385/******************************************************************************/
1386
1387/*
1388 * Check if the operand is NaN, copy to result and return 1 if this is
1389 * the case. Copying can fail since NaNs are allowed to have a payload that
1390 * does not fit in MPD_MINALLOC.
1391 */
1392int
1393mpd_qcheck_nan(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
1394 uint32_t *status)
1395{
1396 if (mpd_isnan(a)) {
1397 *status |= mpd_issnan(a) ? MPD_Invalid_operation : 0;
1398 mpd_qcopy(result, a, status);
1399 mpd_set_qnan(result);
1400 _mpd_fix_nan(result, ctx);
1401 return 1;
1402 }
1403 return 0;
1404}
1405
1406/*
1407 * Check if either operand is NaN, copy to result and return 1 if this
1408 * is the case. Copying can fail since NaNs are allowed to have a payload
1409 * that does not fit in MPD_MINALLOC.
1410 */
1411int
1412mpd_qcheck_nans(mpd_t *result, const mpd_t *a, const mpd_t *b,
1413 const mpd_context_t *ctx, uint32_t *status)
1414{
1415 if ((a->flags|b->flags)&(MPD_NAN|MPD_SNAN)) {
1416 const mpd_t *choice = b;
1417 if (mpd_issnan(a)) {
1418 choice = a;
1419 *status |= MPD_Invalid_operation;
1420 }
1421 else if (mpd_issnan(b)) {
1422 *status |= MPD_Invalid_operation;
1423 }
1424 else if (mpd_isqnan(a)) {
1425 choice = a;
1426 }
1427 mpd_qcopy(result, choice, status);
1428 mpd_set_qnan(result);
1429 _mpd_fix_nan(result, ctx);
1430 return 1;
1431 }
1432 return 0;
1433}
1434
1435/*
1436 * Check if one of the operands is NaN, copy to result and return 1 if this
1437 * is the case. Copying can fail since NaNs are allowed to have a payload
1438 * that does not fit in MPD_MINALLOC.
1439 */
1440static int
1441mpd_qcheck_3nans(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
1442 const mpd_context_t *ctx, uint32_t *status)
1443{
1444 if ((a->flags|b->flags|c->flags)&(MPD_NAN|MPD_SNAN)) {
1445 const mpd_t *choice = c;
1446 if (mpd_issnan(a)) {
1447 choice = a;
1448 *status |= MPD_Invalid_operation;
1449 }
1450 else if (mpd_issnan(b)) {
1451 choice = b;
1452 *status |= MPD_Invalid_operation;
1453 }
1454 else if (mpd_issnan(c)) {
1455 *status |= MPD_Invalid_operation;
1456 }
1457 else if (mpd_isqnan(a)) {
1458 choice = a;
1459 }
1460 else if (mpd_isqnan(b)) {
1461 choice = b;
1462 }
1463 mpd_qcopy(result, choice, status);
1464 mpd_set_qnan(result);
1465 _mpd_fix_nan(result, ctx);
1466 return 1;
1467 }
1468 return 0;
1469}
1470
1471/* Check if rounding digit 'rnd' leads to an increment. */
1472static inline int
1473_mpd_rnd_incr(const mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx)
1474{
1475 int ld;
1476
1477 switch (ctx->round) {
1478 case MPD_ROUND_DOWN: case MPD_ROUND_TRUNC:
1479 return 0;
1480 case MPD_ROUND_HALF_UP:
1481 return (rnd >= 5);
1482 case MPD_ROUND_HALF_EVEN:
1483 return (rnd > 5) || ((rnd == 5) && mpd_isoddcoeff(dec));
1484 case MPD_ROUND_CEILING:
1485 return !(rnd == 0 || mpd_isnegative(dec));
1486 case MPD_ROUND_FLOOR:
1487 return !(rnd == 0 || mpd_ispositive(dec));
1488 case MPD_ROUND_HALF_DOWN:
1489 return (rnd > 5);
1490 case MPD_ROUND_UP:
1491 return !(rnd == 0);
1492 case MPD_ROUND_05UP:
1493 ld = (int)mpd_lsd(dec->data[0]);
1494 return (!(rnd == 0) && (ld == 0 || ld == 5));
1495 default:
1496 /* Without a valid context, further results will be undefined. */
1497 return 0; /* GCOV_NOT_REACHED */
1498 }
1499}
1500
1501/*
1502 * Apply rounding to a decimal that has been right-shifted into a full
1503 * precision decimal. If an increment leads to an overflow of the precision,
1504 * adjust the coefficient and the exponent and check the new exponent for
1505 * overflow.
1506 */
1507static inline void
1508_mpd_apply_round(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1509 uint32_t *status)
1510{
1511 if (_mpd_rnd_incr(dec, rnd, ctx)) {
1512 /* We have a number with exactly ctx->prec digits. The increment
1513 * can only lead to an overflow if the decimal is all nines. In
1514 * that case, the result is a power of ten with prec+1 digits.
1515 *
1516 * If the precision is a multiple of MPD_RDIGITS, this situation is
1517 * detected by _mpd_baseincr returning a carry.
1518 * If the precision is not a multiple of MPD_RDIGITS, we have to
1519 * check if the result has one digit too many.
1520 */
1521 mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
1522 if (carry) {
1523 dec->data[dec->len-1] = mpd_pow10[MPD_RDIGITS-1];
1524 dec->exp += 1;
1525 _mpd_check_exp(dec, ctx, status);
1526 return;
1527 }
1528 mpd_setdigits(dec);
1529 if (dec->digits > ctx->prec) {
1530 mpd_qshiftr_inplace(dec, 1);
1531 dec->exp += 1;
1532 dec->digits = ctx->prec;
1533 _mpd_check_exp(dec, ctx, status);
1534 }
1535 }
1536}
1537
1538/*
1539 * Apply rounding to a decimal. Allow overflow of the precision.
1540 */
1541static inline void
1542_mpd_apply_round_excess(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1543 uint32_t *status)
1544{
1545 if (_mpd_rnd_incr(dec, rnd, ctx)) {
1546 mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
1547 if (carry) {
1548 if (!mpd_qresize(dec, dec->len+1, status)) {
1549 return;
1550 }
1551 dec->data[dec->len] = 1;
1552 dec->len += 1;
1553 }
1554 mpd_setdigits(dec);
1555 }
1556}
1557
1558/*
1559 * Apply rounding to a decimal that has been right-shifted into a decimal
1560 * with full precision or less. Return failure if an increment would
1561 * overflow the precision.
1562 */
1563static inline int
1564_mpd_apply_round_fit(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
1565 uint32_t *status)
1566{
1567 if (_mpd_rnd_incr(dec, rnd, ctx)) {
1568 mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
1569 if (carry) {
1570 if (!mpd_qresize(dec, dec->len+1, status)) {
1571 return 0;
1572 }
1573 dec->data[dec->len] = 1;
1574 dec->len += 1;
1575 }
1576 mpd_setdigits(dec);
1577 if (dec->digits > ctx->prec) {
1578 mpd_seterror(dec, MPD_Invalid_operation, status);
1579 return 0;
1580 }
1581 }
1582 return 1;
1583}
1584
1585/* Check a normal number for overflow, underflow, clamping. If the operand
1586 is modified, it will be zero, special or (sub)normal with a coefficient
1587 that fits into the current context precision. */
1588static inline void
1589_mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1590{
1591 mpd_ssize_t adjexp, etiny, shift;
1592 int rnd;
1593
1594 adjexp = mpd_adjexp(dec);
1595 if (adjexp > ctx->emax) {
1596
1597 if (mpd_iszerocoeff(dec)) {
1598 dec->exp = ctx->emax;
1599 if (ctx->clamp) {
1600 dec->exp -= (ctx->prec-1);
1601 }
1602 mpd_zerocoeff(dec);
1603 *status |= MPD_Clamped;
1604 return;
1605 }
1606
1607 switch (ctx->round) {
1608 case MPD_ROUND_HALF_UP: case MPD_ROUND_HALF_EVEN:
1609 case MPD_ROUND_HALF_DOWN: case MPD_ROUND_UP:
1610 case MPD_ROUND_TRUNC:
1611 mpd_setspecial(dec, mpd_sign(dec), MPD_INF);
1612 break;
1613 case MPD_ROUND_DOWN: case MPD_ROUND_05UP:
1614 mpd_qmaxcoeff(dec, ctx, status);
1615 dec->exp = ctx->emax - ctx->prec + 1;
1616 break;
1617 case MPD_ROUND_CEILING:
1618 if (mpd_isnegative(dec)) {
1619 mpd_qmaxcoeff(dec, ctx, status);
1620 dec->exp = ctx->emax - ctx->prec + 1;
1621 }
1622 else {
1623 mpd_setspecial(dec, MPD_POS, MPD_INF);
1624 }
1625 break;
1626 case MPD_ROUND_FLOOR:
1627 if (mpd_ispositive(dec)) {
1628 mpd_qmaxcoeff(dec, ctx, status);
1629 dec->exp = ctx->emax - ctx->prec + 1;
1630 }
1631 else {
1632 mpd_setspecial(dec, MPD_NEG, MPD_INF);
1633 }
1634 break;
1635 default: /* debug */
1636 abort(); /* GCOV_NOT_REACHED */
1637 }
1638
1639 *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
1640
1641 } /* fold down */
1642 else if (ctx->clamp && dec->exp > mpd_etop(ctx)) {
1643 /* At this point adjexp=exp+digits-1 <= emax and exp > etop=emax-prec+1:
1644 * (1) shift = exp -emax+prec-1 > 0
1645 * (2) digits+shift = exp+digits-1 - emax + prec <= prec */
1646 shift = dec->exp - mpd_etop(ctx);
1647 if (!mpd_qshiftl(dec, dec, shift, status)) {
1648 return;
1649 }
1650 dec->exp -= shift;
1651 *status |= MPD_Clamped;
1652 if (!mpd_iszerocoeff(dec) && adjexp < ctx->emin) {
1653 /* Underflow is impossible, since exp < etiny=emin-prec+1
1654 * and exp > etop=emax-prec+1 would imply emax < emin. */
1655 *status |= MPD_Subnormal;
1656 }
1657 }
1658 else if (adjexp < ctx->emin) {
1659
1660 etiny = mpd_etiny(ctx);
1661
1662 if (mpd_iszerocoeff(dec)) {
1663 if (dec->exp < etiny) {
1664 dec->exp = etiny;
1665 mpd_zerocoeff(dec);
1666 *status |= MPD_Clamped;
1667 }
1668 return;
1669 }
1670
1671 *status |= MPD_Subnormal;
1672 if (dec->exp < etiny) {
1673 /* At this point adjexp=exp+digits-1 < emin and exp < etiny=emin-prec+1:
1674 * (1) shift = emin-prec+1 - exp > 0
1675 * (2) digits-shift = exp+digits-1 - emin + prec < prec */
1676 shift = etiny - dec->exp;
1677 rnd = (int)mpd_qshiftr_inplace(dec, shift);
1678 dec->exp = etiny;
1679 /* We always have a spare digit in case of an increment. */
1680 _mpd_apply_round_excess(dec, rnd, ctx, status);
1681 *status |= MPD_Rounded;
1682 if (rnd) {
1683 *status |= (MPD_Inexact|MPD_Underflow);
1684 if (mpd_iszerocoeff(dec)) {
1685 mpd_zerocoeff(dec);
1686 *status |= MPD_Clamped;
1687 }
1688 }
1689 }
1690 /* Case exp >= etiny=emin-prec+1:
1691 * (1) adjexp=exp+digits-1 < emin
1692 * (2) digits < emin-exp+1 <= prec */
1693 }
1694}
1695
1696/* Transcendental functions do not always set Underflow reliably,
1697 * since they only use as much precision as is necessary for correct
1698 * rounding. If a result like 1.0000000000e-101 is finalized, there
1699 * is no rounding digit that would trigger Underflow. But we can
1700 * assume Inexact, so a short check suffices. */
1701static inline void
1702mpd_check_underflow(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1703{
1704 if (mpd_adjexp(dec) < ctx->emin && !mpd_iszero(dec) &&
1705 dec->exp < mpd_etiny(ctx)) {
1706 *status |= MPD_Underflow;
1707 }
1708}
1709
1710/* Check if a normal number must be rounded after the exponent has been checked. */
1711static inline void
1712_mpd_check_round(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
1713{
1714 mpd_uint_t rnd;
1715 mpd_ssize_t shift;
1716
1717 /* must handle specials: _mpd_check_exp() can produce infinities or NaNs */
1718 if (mpd_isspecial(dec)) {
1719 return;
1720 }
1721
1722 if (dec->digits > ctx->prec) {
1723 shift = dec->digits - ctx->prec;
1724 rnd = mpd_qshiftr_inplace(dec, shift);
1725 dec->exp += shift;
1726 _mpd_apply_round(dec, rnd, ctx, status);
1727 *status |= MPD_Rounded;
1728 if (rnd) {
1729 *status |= MPD_Inexact;
1730 }
1731 }
1732}
1733
1734/* Finalize all operations. */
1735void
1736mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
1737{
1738 if (mpd_isspecial(result)) {
1739 if (mpd_isnan(result)) {
1740 _mpd_fix_nan(result, ctx);
1741 }
1742 return;
1743 }
1744
1745 _mpd_check_exp(result, ctx, status);
1746 _mpd_check_round(result, ctx, status);
1747}
1748
1749
1750/******************************************************************************/
1751/* Copying */
1752/******************************************************************************/
1753
1754/* Internal function: Copy a decimal, share data with src: USE WITH CARE! */
1755static inline void
1756_mpd_copy_shared(mpd_t *dest, const mpd_t *src)
1757{
1758 dest->flags = src->flags;
1759 dest->exp = src->exp;
1760 dest->digits = src->digits;
1761 dest->len = src->len;
1762 dest->alloc = src->alloc;
1763 dest->data = src->data;
1764
1765 mpd_set_shared_data(dest);
1766}
1767
1768/*
1769 * Copy a decimal. In case of an error, status is set to MPD_Malloc_error.
1770 */
1771int
1772mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status)
1773{
1774 if (result == a) return 1;
1775
1776 if (!mpd_qresize(result, a->len, status)) {
1777 return 0;
1778 }
1779
1780 mpd_copy_flags(result, a);
1781 result->exp = a->exp;
1782 result->digits = a->digits;
1783 result->len = a->len;
1784 memcpy(result->data, a->data, a->len * (sizeof *result->data));
1785
1786 return 1;
1787}
1788
1789/*
1790 * Copy to a decimal with a static buffer. The caller has to make sure that
1791 * the buffer is big enough. Cannot fail.
1792 */
1793static void
1794mpd_qcopy_static(mpd_t *result, const mpd_t *a)
1795{
1796 if (result == a) return;
1797
1798 memcpy(result->data, a->data, a->len * (sizeof *result->data));
1799
1800 mpd_copy_flags(result, a);
1801 result->exp = a->exp;
1802 result->digits = a->digits;
1803 result->len = a->len;
1804}
1805
1806/*
1807 * Return a newly allocated copy of the operand. In case of an error,
1808 * status is set to MPD_Malloc_error and the return value is NULL.
1809 */
1810mpd_t *
1811mpd_qncopy(const mpd_t *a)
1812{
1813 mpd_t *result;
1814
1815 if ((result = mpd_qnew_size(a->len)) == NULL) {
1816 return NULL;
1817 }
1818 memcpy(result->data, a->data, a->len * (sizeof *result->data));
1819 mpd_copy_flags(result, a);
1820 result->exp = a->exp;
1821 result->digits = a->digits;
1822 result->len = a->len;
1823
1824 return result;
1825}
1826
1827/*
1828 * Copy a decimal and set the sign to positive. In case of an error, the
1829 * status is set to MPD_Malloc_error.
1830 */
1831int
1832mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status)
1833{
1834 if (!mpd_qcopy(result, a, status)) {
1835 return 0;
1836 }
1837 mpd_set_positive(result);
1838 return 1;
1839}
1840
1841/*
1842 * Copy a decimal and negate the sign. In case of an error, the
1843 * status is set to MPD_Malloc_error.
1844 */
1845int
1846mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status)
1847{
1848 if (!mpd_qcopy(result, a, status)) {
1849 return 0;
1850 }
1851 _mpd_negate(result);
1852 return 1;
1853}
1854
1855/*
1856 * Copy a decimal, setting the sign of the first operand to the sign of the
1857 * second operand. In case of an error, the status is set to MPD_Malloc_error.
1858 */
1859int
1860mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
1861{
1862 uint8_t sign_b = mpd_sign(b); /* result may equal b! */
1863
1864 if (!mpd_qcopy(result, a, status)) {
1865 return 0;
1866 }
1867 mpd_set_sign(result, sign_b);
1868 return 1;
1869}
1870
1871
1872/******************************************************************************/
1873/* Comparisons */
1874/******************************************************************************/
1875
1876/*
1877 * For all functions that compare two operands and return an int the usual
1878 * convention applies to the return value:
1879 *
1880 * -1 if op1 < op2
1881 * 0 if op1 == op2
1882 * 1 if op1 > op2
1883 *
1884 * INT_MAX for error
1885 */
1886
1887
1888/* Convenience macro. If a and b are not equal, return from the calling
1889 * function with the correct comparison value. */
1890#define CMP_EQUAL_OR_RETURN(a, b) \
1891 if (a != b) { \
1892 if (a < b) { \
1893 return -1; \
1894 } \
1895 return 1; \
1896 }
1897
1898/*
1899 * Compare the data of big and small. This function does the equivalent
1900 * of first shifting small to the left and then comparing the data of
1901 * big and small, except that no allocation for the left shift is needed.
1902 */
1903static int
1904_mpd_basecmp(mpd_uint_t *big, mpd_uint_t *small, mpd_size_t n, mpd_size_t m,
1905 mpd_size_t shift)
1906{
1907#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
1908 /* spurious uninitialized warnings */
1909 mpd_uint_t l=l, lprev=lprev, h=h;
1910#else
1911 mpd_uint_t l, lprev, h;
1912#endif
1913 mpd_uint_t q, r;
1914 mpd_uint_t ph, x;
1915
1916 assert(m > 0 && n >= m && shift > 0);
1917
1918 _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
1919
1920 if (r != 0) {
1921
1922 ph = mpd_pow10[r];
1923
1924 --m; --n;
1925 _mpd_divmod_pow10(&h, &lprev, small[m--], MPD_RDIGITS-r);
1926 if (h != 0) {
1927 CMP_EQUAL_OR_RETURN(big[n], h)
1928 --n;
1929 }
1930 for (; m != MPD_SIZE_MAX; m--,n--) {
1931 _mpd_divmod_pow10(&h, &l, small[m], MPD_RDIGITS-r);
1932 x = ph * lprev + h;
1933 CMP_EQUAL_OR_RETURN(big[n], x)
1934 lprev = l;
1935 }
1936 x = ph * lprev;
1937 CMP_EQUAL_OR_RETURN(big[q], x)
1938 }
1939 else {
1940 while (--m != MPD_SIZE_MAX) {
1941 CMP_EQUAL_OR_RETURN(big[m+q], small[m])
1942 }
1943 }
1944
1945 return !_mpd_isallzero(big, q);
1946}
1947
1948/* Compare two decimals with the same adjusted exponent. */
1949static int
1950_mpd_cmp_same_adjexp(const mpd_t *a, const mpd_t *b)
1951{
1952 mpd_ssize_t shift, i;
1953
1954 if (a->exp != b->exp) {
1955 /* Cannot wrap: a->exp + a->digits = b->exp + b->digits, so
1956 * a->exp - b->exp = b->digits - a->digits. */
1957 shift = a->exp - b->exp;
1958 if (shift > 0) {
1959 return -1 * _mpd_basecmp(b->data, a->data, b->len, a->len, shift);
1960 }
1961 else {
1962 return _mpd_basecmp(a->data, b->data, a->len, b->len, -shift);
1963 }
1964 }
1965
1966 /*
1967 * At this point adjexp(a) == adjexp(b) and a->exp == b->exp,
1968 * so a->digits == b->digits, therefore a->len == b->len.
1969 */
1970 for (i = a->len-1; i >= 0; --i) {
1971 CMP_EQUAL_OR_RETURN(a->data[i], b->data[i])
1972 }
1973
1974 return 0;
1975}
1976
1977/* Compare two numerical values. */
1978static int
1979_mpd_cmp(const mpd_t *a, const mpd_t *b)
1980{
1981 mpd_ssize_t adjexp_a, adjexp_b;
1982
1983 /* equal pointers */
1984 if (a == b) {
1985 return 0;
1986 }
1987
1988 /* infinities */
1989 if (mpd_isinfinite(a)) {
1990 if (mpd_isinfinite(b)) {
1991 return mpd_isnegative(b) - mpd_isnegative(a);
1992 }
1993 return mpd_arith_sign(a);
1994 }
1995 if (mpd_isinfinite(b)) {
1996 return -mpd_arith_sign(b);
1997 }
1998
1999 /* zeros */
2000 if (mpd_iszerocoeff(a)) {
2001 if (mpd_iszerocoeff(b)) {
2002 return 0;
2003 }
2004 return -mpd_arith_sign(b);
2005 }
2006 if (mpd_iszerocoeff(b)) {
2007 return mpd_arith_sign(a);
2008 }
2009
2010 /* different signs */
2011 if (mpd_sign(a) != mpd_sign(b)) {
2012 return mpd_sign(b) - mpd_sign(a);
2013 }
2014
2015 /* different adjusted exponents */
2016 adjexp_a = mpd_adjexp(a);
2017 adjexp_b = mpd_adjexp(b);
2018 if (adjexp_a != adjexp_b) {
2019 if (adjexp_a < adjexp_b) {
2020 return -1 * mpd_arith_sign(a);
2021 }
2022 return mpd_arith_sign(a);
2023 }
2024
2025 /* same adjusted exponents */
2026 return _mpd_cmp_same_adjexp(a, b) * mpd_arith_sign(a);
2027}
2028
2029/* Compare the absolutes of two numerical values. */
2030static int
2031_mpd_cmp_abs(const mpd_t *a, const mpd_t *b)
2032{
2033 mpd_ssize_t adjexp_a, adjexp_b;
2034
2035 /* equal pointers */
2036 if (a == b) {
2037 return 0;
2038 }
2039
2040 /* infinities */
2041 if (mpd_isinfinite(a)) {
2042 if (mpd_isinfinite(b)) {
2043 return 0;
2044 }
2045 return 1;
2046 }
2047 if (mpd_isinfinite(b)) {
2048 return -1;
2049 }
2050
2051 /* zeros */
2052 if (mpd_iszerocoeff(a)) {
2053 if (mpd_iszerocoeff(b)) {
2054 return 0;
2055 }
2056 return -1;
2057 }
2058 if (mpd_iszerocoeff(b)) {
2059 return 1;
2060 }
2061
2062 /* different adjusted exponents */
2063 adjexp_a = mpd_adjexp(a);
2064 adjexp_b = mpd_adjexp(b);
2065 if (adjexp_a != adjexp_b) {
2066 if (adjexp_a < adjexp_b) {
2067 return -1;
2068 }
2069 return 1;
2070 }
2071
2072 /* same adjusted exponents */
2073 return _mpd_cmp_same_adjexp(a, b);
2074}
2075
2076/* Compare two values and return an integer result. */
2077int
2078mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status)
2079{
2080 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2081 if (mpd_isnan(a) || mpd_isnan(b)) {
2082 *status |= MPD_Invalid_operation;
2083 return INT_MAX;
2084 }
2085 }
2086
2087 return _mpd_cmp(a, b);
2088}
2089
2090/*
2091 * Compare a and b, convert the the usual integer result to a decimal and
2092 * store it in 'result'. For convenience, the integer result of the comparison
2093 * is returned. Comparisons involving NaNs return NaN/INT_MAX.
2094 */
2095int
2096mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b,
2097 const mpd_context_t *ctx, uint32_t *status)
2098{
2099 int c;
2100
2101 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2102 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
2103 return INT_MAX;
2104 }
2105 }
2106
2107 c = _mpd_cmp(a, b);
2108 _settriple(result, (c < 0), (c != 0), 0);
2109 return c;
2110}
2111
2112/* Same as mpd_compare(), but signal for all NaNs, i.e. also for quiet NaNs. */
2113int
2114mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b,
2115 const mpd_context_t *ctx, uint32_t *status)
2116{
2117 int c;
2118
2119 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2120 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
2121 *status |= MPD_Invalid_operation;
2122 return INT_MAX;
2123 }
2124 }
2125
2126 c = _mpd_cmp(a, b);
2127 _settriple(result, (c < 0), (c != 0), 0);
2128 return c;
2129}
2130
2131/* Compare the operands using a total order. */
2132int
2133mpd_cmp_total(const mpd_t *a, const mpd_t *b)
2134{
2135 mpd_t aa, bb;
2136 int nan_a, nan_b;
2137 int c;
2138
2139 if (mpd_sign(a) != mpd_sign(b)) {
2140 return mpd_sign(b) - mpd_sign(a);
2141 }
2142
2143
2144 if (mpd_isnan(a)) {
2145 c = 1;
2146 if (mpd_isnan(b)) {
2147 nan_a = (mpd_isqnan(a)) ? 1 : 0;
2148 nan_b = (mpd_isqnan(b)) ? 1 : 0;
2149 if (nan_b == nan_a) {
2150 if (a->len > 0 && b->len > 0) {
2151 _mpd_copy_shared(&aa, a);
2152 _mpd_copy_shared(&bb, b);
2153 aa.exp = bb.exp = 0;
2154 /* compare payload */
2155 c = _mpd_cmp_abs(&aa, &bb);
2156 }
2157 else {
2158 c = (a->len > 0) - (b->len > 0);
2159 }
2160 }
2161 else {
2162 c = nan_a - nan_b;
2163 }
2164 }
2165 }
2166 else if (mpd_isnan(b)) {
2167 c = -1;
2168 }
2169 else {
2170 c = _mpd_cmp_abs(a, b);
2171 if (c == 0 && a->exp != b->exp) {
2172 c = (a->exp < b->exp) ? -1 : 1;
2173 }
2174 }
2175
2176 return c * mpd_arith_sign(a);
2177}
2178
2179/*
2180 * Compare a and b according to a total order, convert the usual integer result
2181 * to a decimal and store it in 'result'. For convenience, the integer result
2182 * of the comparison is returned.
2183 */
2184int
2185mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b)
2186{
2187 int c;
2188
2189 c = mpd_cmp_total(a, b);
2190 _settriple(result, (c < 0), (c != 0), 0);
2191 return c;
2192}
2193
2194/* Compare the magnitude of the operands using a total order. */
2195int
2196mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b)
2197{
2198 mpd_t aa, bb;
2199
2200 _mpd_copy_shared(&aa, a);
2201 _mpd_copy_shared(&bb, b);
2202
2203 mpd_set_positive(&aa);
2204 mpd_set_positive(&bb);
2205
2206 return mpd_cmp_total(&aa, &bb);
2207}
2208
2209/*
2210 * Compare the magnitude of a and b according to a total order, convert the
2211 * the usual integer result to a decimal and store it in 'result'.
2212 * For convenience, the integer result of the comparison is returned.
2213 */
2214int
2215mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b)
2216{
2217 int c;
2218
2219 c = mpd_cmp_total_mag(a, b);
2220 _settriple(result, (c < 0), (c != 0), 0);
2221 return c;
2222}
2223
2224/* Determine an ordering for operands that are numerically equal. */
2225static inline int
2226_mpd_cmp_numequal(const mpd_t *a, const mpd_t *b)
2227{
2228 int sign_a, sign_b;
2229 int c;
2230
2231 sign_a = mpd_sign(a);
2232 sign_b = mpd_sign(b);
2233 if (sign_a != sign_b) {
2234 c = sign_b - sign_a;
2235 }
2236 else {
2237 c = (a->exp < b->exp) ? -1 : 1;
2238 c *= mpd_arith_sign(a);
2239 }
2240
2241 return c;
2242}
2243
2244
2245/******************************************************************************/
2246/* Shifting the coefficient */
2247/******************************************************************************/
2248
2249/*
2250 * Shift the coefficient of the operand to the left, no check for specials.
2251 * Both operands may be the same pointer. If the result length has to be
2252 * increased, mpd_qresize() might fail with MPD_Malloc_error.
2253 */
2254int
2255mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
2256{
2257 mpd_ssize_t size;
2258
Stefan Krahdc36efa2012-04-07 15:57:59 +02002259 assert(!mpd_isspecial(a));
Stefan Krah1919b7e2012-03-21 18:25:23 +01002260 assert(n >= 0);
2261
2262 if (mpd_iszerocoeff(a) || n == 0) {
2263 return mpd_qcopy(result, a, status);
2264 }
2265
2266 size = mpd_digits_to_size(a->digits+n);
2267 if (!mpd_qresize(result, size, status)) {
2268 return 0; /* result is NaN */
2269 }
2270
2271 _mpd_baseshiftl(result->data, a->data, size, a->len, n);
2272
2273 mpd_copy_flags(result, a);
Stefan Krah1919b7e2012-03-21 18:25:23 +01002274 result->exp = a->exp;
2275 result->digits = a->digits+n;
Stefan Krahdc36efa2012-04-07 15:57:59 +02002276 result->len = size;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002277
2278 return 1;
2279}
2280
2281/* Determine the rounding indicator if all digits of the coefficient are shifted
2282 * out of the picture. */
2283static mpd_uint_t
2284_mpd_get_rnd(const mpd_uint_t *data, mpd_ssize_t len, int use_msd)
2285{
2286 mpd_uint_t rnd = 0, rest = 0, word;
2287
2288 word = data[len-1];
2289 /* special treatment for the most significant digit if shift == digits */
2290 if (use_msd) {
2291 _mpd_divmod_pow10(&rnd, &rest, word, mpd_word_digits(word)-1);
2292 if (len > 1 && rest == 0) {
2293 rest = !_mpd_isallzero(data, len-1);
2294 }
2295 }
2296 else {
2297 rest = !_mpd_isallzero(data, len);
2298 }
2299
2300 return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
2301}
2302
2303/*
Stefan Krahdc36efa2012-04-07 15:57:59 +02002304 * Same as mpd_qshiftr(), but 'result' is an mpd_t with a static coefficient.
2305 * It is the caller's responsibility to ensure that the coefficient is big
2306 * enough. The function cannot fail.
Stefan Krah1919b7e2012-03-21 18:25:23 +01002307 */
2308mpd_uint_t
2309mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n)
2310{
2311 mpd_uint_t rnd;
2312 mpd_ssize_t size;
2313
Stefan Krahdc36efa2012-04-07 15:57:59 +02002314 assert(!mpd_isspecial(a));
Stefan Krah1919b7e2012-03-21 18:25:23 +01002315 assert(n >= 0);
2316
2317 if (mpd_iszerocoeff(a) || n == 0) {
2318 mpd_qcopy_static(result, a);
2319 return 0;
2320 }
2321
2322 if (n >= a->digits) {
2323 rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
2324 mpd_zerocoeff(result);
Stefan Krah1919b7e2012-03-21 18:25:23 +01002325 }
2326 else {
2327 result->digits = a->digits-n;
2328 size = mpd_digits_to_size(result->digits);
2329 rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
Stefan Krahdc36efa2012-04-07 15:57:59 +02002330 result->len = size;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002331 }
2332
2333 mpd_copy_flags(result, a);
2334 result->exp = a->exp;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002335
2336 return rnd;
2337}
2338
2339/*
2340 * Inplace shift of the coefficient to the right, no check for specials.
2341 * Returns the rounding indicator for mpd_rnd_incr().
2342 * The function cannot fail.
2343 */
2344mpd_uint_t
2345mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n)
2346{
2347 uint32_t dummy;
2348 mpd_uint_t rnd;
2349 mpd_ssize_t size;
2350
Stefan Krahdc36efa2012-04-07 15:57:59 +02002351 assert(!mpd_isspecial(result));
Stefan Krah1919b7e2012-03-21 18:25:23 +01002352 assert(n >= 0);
2353
2354 if (mpd_iszerocoeff(result) || n == 0) {
2355 return 0;
2356 }
2357
2358 if (n >= result->digits) {
2359 rnd = _mpd_get_rnd(result->data, result->len, (n==result->digits));
2360 mpd_zerocoeff(result);
Stefan Krah1919b7e2012-03-21 18:25:23 +01002361 }
2362 else {
2363 rnd = _mpd_baseshiftr(result->data, result->data, result->len, n);
2364 result->digits -= n;
2365 size = mpd_digits_to_size(result->digits);
2366 /* reducing the size cannot fail */
2367 mpd_qresize(result, size, &dummy);
Stefan Krahdc36efa2012-04-07 15:57:59 +02002368 result->len = size;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002369 }
2370
Stefan Krah1919b7e2012-03-21 18:25:23 +01002371 return rnd;
2372}
2373
2374/*
2375 * Shift the coefficient of the operand to the right, no check for specials.
2376 * Both operands may be the same pointer. Returns the rounding indicator to
2377 * be used by mpd_rnd_incr(). If the result length has to be increased,
2378 * mpd_qcopy() or mpd_qresize() might fail with MPD_Malloc_error. In those
2379 * cases, MPD_UINT_MAX is returned.
2380 */
2381mpd_uint_t
2382mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
2383{
2384 mpd_uint_t rnd;
2385 mpd_ssize_t size;
2386
Stefan Krahdc36efa2012-04-07 15:57:59 +02002387 assert(!mpd_isspecial(a));
Stefan Krah1919b7e2012-03-21 18:25:23 +01002388 assert(n >= 0);
2389
2390 if (mpd_iszerocoeff(a) || n == 0) {
2391 if (!mpd_qcopy(result, a, status)) {
2392 return MPD_UINT_MAX;
2393 }
2394 return 0;
2395 }
2396
2397 if (n >= a->digits) {
2398 rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
2399 mpd_zerocoeff(result);
Stefan Krah1919b7e2012-03-21 18:25:23 +01002400 }
2401 else {
2402 result->digits = a->digits-n;
2403 size = mpd_digits_to_size(result->digits);
2404 if (result == a) {
2405 rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
2406 /* reducing the size cannot fail */
2407 mpd_qresize(result, size, status);
2408 }
2409 else {
2410 if (!mpd_qresize(result, size, status)) {
2411 return MPD_UINT_MAX;
2412 }
2413 rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
2414 }
Stefan Krahdc36efa2012-04-07 15:57:59 +02002415 result->len = size;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002416 }
2417
2418 mpd_copy_flags(result, a);
2419 result->exp = a->exp;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002420
2421 return rnd;
2422}
2423
2424
2425/******************************************************************************/
2426/* Miscellaneous operations */
2427/******************************************************************************/
2428
2429/* Logical And */
2430void
2431mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b,
2432 const mpd_context_t *ctx, uint32_t *status)
2433{
2434 const mpd_t *big = a, *small = b;
2435 mpd_uint_t x, y, z, xbit, ybit;
2436 int k, mswdigits;
2437 mpd_ssize_t i;
2438
2439 if (mpd_isspecial(a) || mpd_isspecial(b) ||
2440 mpd_isnegative(a) || mpd_isnegative(b) ||
2441 a->exp != 0 || b->exp != 0) {
2442 mpd_seterror(result, MPD_Invalid_operation, status);
2443 return;
2444 }
2445 if (b->digits > a->digits) {
2446 big = b;
2447 small = a;
2448 }
2449 if (!mpd_qresize(result, big->len, status)) {
2450 return;
2451 }
2452
2453
2454 /* full words */
2455 for (i = 0; i < small->len-1; i++) {
2456 x = small->data[i];
2457 y = big->data[i];
2458 z = 0;
2459 for (k = 0; k < MPD_RDIGITS; k++) {
2460 xbit = x % 10;
2461 x /= 10;
2462 ybit = y % 10;
2463 y /= 10;
2464 if (xbit > 1 || ybit > 1) {
2465 goto invalid_operation;
2466 }
2467 z += (xbit&ybit) ? mpd_pow10[k] : 0;
2468 }
2469 result->data[i] = z;
2470 }
2471 /* most significant word of small */
2472 x = small->data[i];
2473 y = big->data[i];
2474 z = 0;
2475 mswdigits = mpd_word_digits(x);
2476 for (k = 0; k < mswdigits; k++) {
2477 xbit = x % 10;
2478 x /= 10;
2479 ybit = y % 10;
2480 y /= 10;
2481 if (xbit > 1 || ybit > 1) {
2482 goto invalid_operation;
2483 }
2484 z += (xbit&ybit) ? mpd_pow10[k] : 0;
2485 }
2486 result->data[i++] = z;
2487
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002488 /* scan the rest of y for digits > 1 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01002489 for (; k < MPD_RDIGITS; k++) {
2490 ybit = y % 10;
2491 y /= 10;
2492 if (ybit > 1) {
2493 goto invalid_operation;
2494 }
2495 }
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002496 /* scan the rest of big for digits > 1 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01002497 for (; i < big->len; i++) {
2498 y = big->data[i];
2499 for (k = 0; k < MPD_RDIGITS; k++) {
2500 ybit = y % 10;
2501 y /= 10;
2502 if (ybit > 1) {
2503 goto invalid_operation;
2504 }
2505 }
2506 }
2507
2508 mpd_clear_flags(result);
2509 result->exp = 0;
2510 result->len = _mpd_real_size(result->data, small->len);
2511 mpd_qresize(result, result->len, status);
2512 mpd_setdigits(result);
2513 _mpd_cap(result, ctx);
2514 return;
2515
2516invalid_operation:
2517 mpd_seterror(result, MPD_Invalid_operation, status);
2518}
2519
2520/* Class of an operand. Returns a pointer to the constant name. */
2521const char *
2522mpd_class(const mpd_t *a, const mpd_context_t *ctx)
2523{
2524 if (mpd_isnan(a)) {
2525 if (mpd_isqnan(a))
2526 return "NaN";
2527 else
2528 return "sNaN";
2529 }
2530 else if (mpd_ispositive(a)) {
2531 if (mpd_isinfinite(a))
2532 return "+Infinity";
2533 else if (mpd_iszero(a))
2534 return "+Zero";
2535 else if (mpd_isnormal(a, ctx))
2536 return "+Normal";
2537 else
2538 return "+Subnormal";
2539 }
2540 else {
2541 if (mpd_isinfinite(a))
2542 return "-Infinity";
2543 else if (mpd_iszero(a))
2544 return "-Zero";
2545 else if (mpd_isnormal(a, ctx))
2546 return "-Normal";
2547 else
2548 return "-Subnormal";
2549 }
2550}
2551
2552/* Logical Xor */
2553void
2554mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
2555 uint32_t *status)
2556{
2557 mpd_uint_t x, z, xbit;
2558 mpd_ssize_t i, digits, len;
2559 mpd_ssize_t q, r;
2560 int k;
2561
2562 if (mpd_isspecial(a) || mpd_isnegative(a) || a->exp != 0) {
2563 mpd_seterror(result, MPD_Invalid_operation, status);
2564 return;
2565 }
2566
2567 digits = (a->digits < ctx->prec) ? ctx->prec : a->digits;
2568 _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
2569 len = (r == 0) ? q : q+1;
2570 if (!mpd_qresize(result, len, status)) {
2571 return;
2572 }
2573
2574 for (i = 0; i < len; i++) {
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002575 x = (i < a->len) ? a->data[i] : 0;
Stefan Krah1919b7e2012-03-21 18:25:23 +01002576 z = 0;
2577 for (k = 0; k < MPD_RDIGITS; k++) {
2578 xbit = x % 10;
2579 x /= 10;
2580 if (xbit > 1) {
2581 goto invalid_operation;
2582 }
2583 z += !xbit ? mpd_pow10[k] : 0;
2584 }
2585 result->data[i] = z;
2586 }
2587
2588 mpd_clear_flags(result);
2589 result->exp = 0;
2590 result->len = _mpd_real_size(result->data, len);
2591 mpd_qresize(result, result->len, status);
2592 mpd_setdigits(result);
2593 _mpd_cap(result, ctx);
2594 return;
2595
2596invalid_operation:
2597 mpd_seterror(result, MPD_Invalid_operation, status);
2598}
2599
2600/* Exponent of the magnitude of the most significant digit of the operand. */
2601void
2602mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
2603 uint32_t *status)
2604{
2605 if (mpd_isspecial(a)) {
2606 if (mpd_qcheck_nan(result, a, ctx, status)) {
2607 return;
2608 }
2609 mpd_setspecial(result, MPD_POS, MPD_INF);
2610 }
2611 else if (mpd_iszerocoeff(a)) {
2612 mpd_setspecial(result, MPD_NEG, MPD_INF);
2613 *status |= MPD_Division_by_zero;
2614 }
2615 else {
2616 mpd_qset_ssize(result, mpd_adjexp(a), ctx, status);
2617 }
2618}
2619
2620/* Logical Or */
2621void
2622mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b,
2623 const mpd_context_t *ctx, uint32_t *status)
2624{
2625 const mpd_t *big = a, *small = b;
2626 mpd_uint_t x, y, z, xbit, ybit;
2627 int k, mswdigits;
2628 mpd_ssize_t i;
2629
2630 if (mpd_isspecial(a) || mpd_isspecial(b) ||
2631 mpd_isnegative(a) || mpd_isnegative(b) ||
2632 a->exp != 0 || b->exp != 0) {
2633 mpd_seterror(result, MPD_Invalid_operation, status);
2634 return;
2635 }
2636 if (b->digits > a->digits) {
2637 big = b;
2638 small = a;
2639 }
2640 if (!mpd_qresize(result, big->len, status)) {
2641 return;
2642 }
2643
2644
2645 /* full words */
2646 for (i = 0; i < small->len-1; i++) {
2647 x = small->data[i];
2648 y = big->data[i];
2649 z = 0;
2650 for (k = 0; k < MPD_RDIGITS; k++) {
2651 xbit = x % 10;
2652 x /= 10;
2653 ybit = y % 10;
2654 y /= 10;
2655 if (xbit > 1 || ybit > 1) {
2656 goto invalid_operation;
2657 }
2658 z += (xbit|ybit) ? mpd_pow10[k] : 0;
2659 }
2660 result->data[i] = z;
2661 }
2662 /* most significant word of small */
2663 x = small->data[i];
2664 y = big->data[i];
2665 z = 0;
2666 mswdigits = mpd_word_digits(x);
2667 for (k = 0; k < mswdigits; k++) {
2668 xbit = x % 10;
2669 x /= 10;
2670 ybit = y % 10;
2671 y /= 10;
2672 if (xbit > 1 || ybit > 1) {
2673 goto invalid_operation;
2674 }
2675 z += (xbit|ybit) ? mpd_pow10[k] : 0;
2676 }
2677
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002678 /* scan for digits > 1 and copy the rest of y */
Stefan Krah1919b7e2012-03-21 18:25:23 +01002679 for (; k < MPD_RDIGITS; k++) {
2680 ybit = y % 10;
2681 y /= 10;
2682 if (ybit > 1) {
2683 goto invalid_operation;
2684 }
2685 z += ybit*mpd_pow10[k];
2686 }
2687 result->data[i++] = z;
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002688 /* scan for digits > 1 and copy the rest of big */
Stefan Krah1919b7e2012-03-21 18:25:23 +01002689 for (; i < big->len; i++) {
2690 y = big->data[i];
2691 for (k = 0; k < MPD_RDIGITS; k++) {
2692 ybit = y % 10;
2693 y /= 10;
2694 if (ybit > 1) {
2695 goto invalid_operation;
2696 }
2697 }
2698 result->data[i] = big->data[i];
2699 }
2700
2701 mpd_clear_flags(result);
2702 result->exp = 0;
2703 result->len = _mpd_real_size(result->data, big->len);
2704 mpd_qresize(result, result->len, status);
2705 mpd_setdigits(result);
2706 _mpd_cap(result, ctx);
2707 return;
2708
2709invalid_operation:
2710 mpd_seterror(result, MPD_Invalid_operation, status);
2711}
2712
2713/*
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002714 * Rotate the coefficient of 'a' by 'b' digits. 'b' must be an integer with
Stefan Krah1919b7e2012-03-21 18:25:23 +01002715 * exponent 0.
2716 */
2717void
2718mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b,
2719 const mpd_context_t *ctx, uint32_t *status)
2720{
2721 uint32_t workstatus = 0;
2722 MPD_NEW_STATIC(tmp,0,0,0,0);
2723 MPD_NEW_STATIC(big,0,0,0,0);
2724 MPD_NEW_STATIC(small,0,0,0,0);
2725 mpd_ssize_t n, lshift, rshift;
2726
2727 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2728 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
2729 return;
2730 }
2731 }
2732 if (b->exp != 0 || mpd_isinfinite(b)) {
2733 mpd_seterror(result, MPD_Invalid_operation, status);
2734 return;
2735 }
2736
2737 n = mpd_qget_ssize(b, &workstatus);
2738 if (workstatus&MPD_Invalid_operation) {
2739 mpd_seterror(result, MPD_Invalid_operation, status);
2740 return;
2741 }
2742 if (n > ctx->prec || n < -ctx->prec) {
2743 mpd_seterror(result, MPD_Invalid_operation, status);
2744 return;
2745 }
2746 if (mpd_isinfinite(a)) {
2747 mpd_qcopy(result, a, status);
2748 return;
2749 }
2750
2751 if (n >= 0) {
2752 lshift = n;
2753 rshift = ctx->prec-n;
2754 }
2755 else {
2756 lshift = ctx->prec+n;
2757 rshift = -n;
2758 }
2759
2760 if (a->digits > ctx->prec) {
2761 if (!mpd_qcopy(&tmp, a, status)) {
2762 mpd_seterror(result, MPD_Malloc_error, status);
2763 goto finish;
2764 }
2765 _mpd_cap(&tmp, ctx);
2766 a = &tmp;
2767 }
2768
2769 if (!mpd_qshiftl(&big, a, lshift, status)) {
2770 mpd_seterror(result, MPD_Malloc_error, status);
2771 goto finish;
2772 }
2773 _mpd_cap(&big, ctx);
2774
2775 if (mpd_qshiftr(&small, a, rshift, status) == MPD_UINT_MAX) {
2776 mpd_seterror(result, MPD_Malloc_error, status);
2777 goto finish;
2778 }
2779 _mpd_qadd(result, &big, &small, ctx, status);
2780
2781
2782finish:
2783 mpd_del(&tmp);
2784 mpd_del(&big);
2785 mpd_del(&small);
2786}
2787
2788/*
2789 * b must be an integer with exponent 0 and in the range +-2*(emax + prec).
2790 * XXX: In my opinion +-(2*emax + prec) would be more sensible.
2791 * The result is a with the value of b added to its exponent.
2792 */
2793void
2794mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b,
2795 const mpd_context_t *ctx, uint32_t *status)
2796{
2797 uint32_t workstatus = 0;
2798 mpd_uint_t n, maxjump;
2799#ifndef LEGACY_COMPILER
2800 int64_t exp;
2801#else
2802 mpd_uint_t x;
2803 int x_sign, n_sign;
2804 mpd_ssize_t exp;
2805#endif
2806
2807 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2808 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
2809 return;
2810 }
2811 }
2812 if (b->exp != 0 || mpd_isinfinite(b)) {
2813 mpd_seterror(result, MPD_Invalid_operation, status);
2814 return;
2815 }
2816
2817 n = mpd_qabs_uint(b, &workstatus);
2818 /* the spec demands this */
2819 maxjump = 2 * (mpd_uint_t)(ctx->emax + ctx->prec);
2820
2821 if (n > maxjump || workstatus&MPD_Invalid_operation) {
2822 mpd_seterror(result, MPD_Invalid_operation, status);
2823 return;
2824 }
2825 if (mpd_isinfinite(a)) {
2826 mpd_qcopy(result, a, status);
2827 return;
2828 }
2829
2830#ifndef LEGACY_COMPILER
2831 exp = a->exp + (int64_t)n * mpd_arith_sign(b);
2832 exp = (exp > MPD_EXP_INF) ? MPD_EXP_INF : exp;
2833 exp = (exp < MPD_EXP_CLAMP) ? MPD_EXP_CLAMP : exp;
2834#else
2835 x = (a->exp < 0) ? -a->exp : a->exp;
2836 x_sign = (a->exp < 0) ? 1 : 0;
2837 n_sign = mpd_isnegative(b) ? 1 : 0;
2838
2839 if (x_sign == n_sign) {
2840 x = x + n;
2841 if (x < n) x = MPD_UINT_MAX;
2842 }
2843 else {
2844 x_sign = (x >= n) ? x_sign : n_sign;
2845 x = (x >= n) ? x - n : n - x;
2846 }
2847 if (!x_sign && x > MPD_EXP_INF) x = MPD_EXP_INF;
2848 if (x_sign && x > -MPD_EXP_CLAMP) x = -MPD_EXP_CLAMP;
2849 exp = x_sign ? -((mpd_ssize_t)x) : (mpd_ssize_t)x;
2850#endif
2851
2852 mpd_qcopy(result, a, status);
2853 result->exp = (mpd_ssize_t)exp;
2854
2855 mpd_qfinalize(result, ctx, status);
2856}
2857
2858/*
2859 * Shift the coefficient by n digits, positive n is a left shift. In the case
2860 * of a left shift, the result is decapitated to fit the context precision. If
2861 * you don't want that, use mpd_shiftl().
2862 */
2863void
2864mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx,
2865 uint32_t *status)
2866{
2867 if (mpd_isspecial(a)) {
2868 if (mpd_qcheck_nan(result, a, ctx, status)) {
2869 return;
2870 }
2871 mpd_qcopy(result, a, status);
2872 return;
2873 }
2874
2875 if (n >= 0 && n <= ctx->prec) {
2876 mpd_qshiftl(result, a, n, status);
2877 _mpd_cap(result, ctx);
2878 }
2879 else if (n < 0 && n >= -ctx->prec) {
2880 if (!mpd_qcopy(result, a, status)) {
2881 return;
2882 }
2883 _mpd_cap(result, ctx);
2884 mpd_qshiftr_inplace(result, -n);
2885 }
2886 else {
2887 mpd_seterror(result, MPD_Invalid_operation, status);
2888 }
2889}
2890
2891/*
2892 * Same as mpd_shiftn(), but the shift is specified by the decimal b, which
2893 * must be an integer with a zero exponent. Infinities remain infinities.
2894 */
2895void
2896mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
2897 uint32_t *status)
2898{
2899 uint32_t workstatus = 0;
2900 mpd_ssize_t n;
2901
2902 if (mpd_isspecial(a) || mpd_isspecial(b)) {
2903 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
2904 return;
2905 }
2906 }
2907 if (b->exp != 0 || mpd_isinfinite(b)) {
2908 mpd_seterror(result, MPD_Invalid_operation, status);
2909 return;
2910 }
2911
2912 n = mpd_qget_ssize(b, &workstatus);
2913 if (workstatus&MPD_Invalid_operation) {
2914 mpd_seterror(result, MPD_Invalid_operation, status);
2915 return;
2916 }
2917 if (n > ctx->prec || n < -ctx->prec) {
2918 mpd_seterror(result, MPD_Invalid_operation, status);
2919 return;
2920 }
2921 if (mpd_isinfinite(a)) {
2922 mpd_qcopy(result, a, status);
2923 return;
2924 }
2925
2926 if (n >= 0) {
2927 mpd_qshiftl(result, a, n, status);
2928 _mpd_cap(result, ctx);
2929 }
2930 else {
2931 if (!mpd_qcopy(result, a, status)) {
2932 return;
2933 }
2934 _mpd_cap(result, ctx);
2935 mpd_qshiftr_inplace(result, -n);
2936 }
2937}
2938
2939/* Logical Xor */
2940void
2941mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b,
2942 const mpd_context_t *ctx, uint32_t *status)
2943{
2944 const mpd_t *big = a, *small = b;
2945 mpd_uint_t x, y, z, xbit, ybit;
2946 int k, mswdigits;
2947 mpd_ssize_t i;
2948
2949 if (mpd_isspecial(a) || mpd_isspecial(b) ||
2950 mpd_isnegative(a) || mpd_isnegative(b) ||
2951 a->exp != 0 || b->exp != 0) {
2952 mpd_seterror(result, MPD_Invalid_operation, status);
2953 return;
2954 }
2955 if (b->digits > a->digits) {
2956 big = b;
2957 small = a;
2958 }
2959 if (!mpd_qresize(result, big->len, status)) {
2960 return;
2961 }
2962
2963
2964 /* full words */
2965 for (i = 0; i < small->len-1; i++) {
2966 x = small->data[i];
2967 y = big->data[i];
2968 z = 0;
2969 for (k = 0; k < MPD_RDIGITS; k++) {
2970 xbit = x % 10;
2971 x /= 10;
2972 ybit = y % 10;
2973 y /= 10;
2974 if (xbit > 1 || ybit > 1) {
2975 goto invalid_operation;
2976 }
2977 z += (xbit^ybit) ? mpd_pow10[k] : 0;
2978 }
2979 result->data[i] = z;
2980 }
2981 /* most significant word of small */
2982 x = small->data[i];
2983 y = big->data[i];
2984 z = 0;
2985 mswdigits = mpd_word_digits(x);
2986 for (k = 0; k < mswdigits; k++) {
2987 xbit = x % 10;
2988 x /= 10;
2989 ybit = y % 10;
2990 y /= 10;
2991 if (xbit > 1 || ybit > 1) {
2992 goto invalid_operation;
2993 }
2994 z += (xbit^ybit) ? mpd_pow10[k] : 0;
2995 }
2996
Stefan Krahaecaf0b2012-04-18 18:08:20 +02002997 /* scan for digits > 1 and copy the rest of y */
Stefan Krah1919b7e2012-03-21 18:25:23 +01002998 for (; k < MPD_RDIGITS; k++) {
2999 ybit = y % 10;
3000 y /= 10;
3001 if (ybit > 1) {
3002 goto invalid_operation;
3003 }
3004 z += ybit*mpd_pow10[k];
3005 }
3006 result->data[i++] = z;
Stefan Krahaecaf0b2012-04-18 18:08:20 +02003007 /* scan for digits > 1 and copy the rest of big */
Stefan Krah1919b7e2012-03-21 18:25:23 +01003008 for (; i < big->len; i++) {
3009 y = big->data[i];
3010 for (k = 0; k < MPD_RDIGITS; k++) {
3011 ybit = y % 10;
3012 y /= 10;
3013 if (ybit > 1) {
3014 goto invalid_operation;
3015 }
3016 }
3017 result->data[i] = big->data[i];
3018 }
3019
3020 mpd_clear_flags(result);
3021 result->exp = 0;
3022 result->len = _mpd_real_size(result->data, big->len);
3023 mpd_qresize(result, result->len, status);
3024 mpd_setdigits(result);
3025 _mpd_cap(result, ctx);
3026 return;
3027
3028invalid_operation:
3029 mpd_seterror(result, MPD_Invalid_operation, status);
3030}
3031
3032
3033/******************************************************************************/
3034/* Arithmetic operations */
3035/******************************************************************************/
3036
3037/*
3038 * The absolute value of a. If a is negative, the result is the same
3039 * as the result of the minus operation. Otherwise, the result is the
3040 * result of the plus operation.
3041 */
3042void
3043mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
3044 uint32_t *status)
3045{
3046 if (mpd_isspecial(a)) {
3047 if (mpd_qcheck_nan(result, a, ctx, status)) {
3048 return;
3049 }
3050 }
3051
3052 if (mpd_isnegative(a)) {
3053 mpd_qminus(result, a, ctx, status);
3054 }
3055 else {
3056 mpd_qplus(result, a, ctx, status);
3057 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01003058}
3059
3060static inline void
3061_mpd_ptrswap(mpd_t **a, mpd_t **b)
3062{
3063 mpd_t *t = *a;
3064 *a = *b;
3065 *b = t;
3066}
3067
3068/* Add or subtract infinities. */
3069static void
3070_mpd_qaddsub_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
3071 uint32_t *status)
3072{
3073 if (mpd_isinfinite(a)) {
3074 if (mpd_sign(a) != sign_b && mpd_isinfinite(b)) {
3075 mpd_seterror(result, MPD_Invalid_operation, status);
3076 }
3077 else {
3078 mpd_setspecial(result, mpd_sign(a), MPD_INF);
3079 }
3080 return;
3081 }
3082 assert(mpd_isinfinite(b));
3083 mpd_setspecial(result, sign_b, MPD_INF);
3084}
3085
3086/* Add or subtract non-special numbers. */
3087static void
3088_mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
3089 const mpd_context_t *ctx, uint32_t *status)
3090{
3091 mpd_t *big, *small;
3092 MPD_NEW_STATIC(big_aligned,0,0,0,0);
Stefan Krahed4b21f2012-04-18 18:45:22 +02003093 MPD_NEW_CONST(tiny,0,0,1,1,1,1);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003094 mpd_uint_t carry;
3095 mpd_ssize_t newsize, shift;
3096 mpd_ssize_t exp, i;
3097 int swap = 0;
3098
3099
3100 /* compare exponents */
3101 big = (mpd_t *)a; small = (mpd_t *)b;
3102 if (big->exp != small->exp) {
3103 if (small->exp > big->exp) {
3104 _mpd_ptrswap(&big, &small);
3105 swap++;
3106 }
Stefan Krah5d0d2e22012-04-18 18:59:56 +02003107 /* align the coefficients */
Stefan Krah1919b7e2012-03-21 18:25:23 +01003108 if (!mpd_iszerocoeff(big)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01003109 exp = big->exp - 1;
3110 exp += (big->digits > ctx->prec) ? 0 : big->digits-ctx->prec-1;
3111 if (mpd_adjexp(small) < exp) {
Stefan Krah5d0d2e22012-04-18 18:59:56 +02003112 /*
3113 * Avoid huge shifts by substituting a value for small that is
3114 * guaranteed to produce the same results.
3115 *
3116 * adjexp(small) < exp if and only if:
3117 *
3118 * bdigits <= prec AND
3119 * bdigits+shift >= prec+2+sdigits AND
3120 * exp = bexp+bdigits-prec-2
3121 *
3122 * 1234567000000000 -> bdigits + shift
3123 * ----------XX1234 -> sdigits
3124 * ----------X1 -> tiny-digits
3125 * |- prec -|
3126 *
3127 * OR
3128 *
3129 * bdigits > prec AND
3130 * shift > sdigits AND
3131 * exp = bexp-1
3132 *
3133 * 1234567892100000 -> bdigits + shift
3134 * ----------XX1234 -> sdigits
3135 * ----------X1 -> tiny-digits
3136 * |- prec -|
3137 *
3138 * If tiny is zero, adding or subtracting is a no-op.
3139 * Otherwise, adding tiny generates a non-zero digit either
3140 * below the rounding digit or the least significant digit
3141 * of big. When subtracting, tiny is in the same position as
3142 * the carry that would be generated by subtracting sdigits.
3143 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01003144 mpd_copy_flags(&tiny, small);
3145 tiny.exp = exp;
3146 tiny.digits = 1;
3147 tiny.len = 1;
3148 tiny.data[0] = mpd_iszerocoeff(small) ? 0 : 1;
3149 small = &tiny;
3150 }
Stefan Krah5d0d2e22012-04-18 18:59:56 +02003151 /* This cannot wrap: the difference is positive and <= maxprec */
Stefan Krah1919b7e2012-03-21 18:25:23 +01003152 shift = big->exp - small->exp;
3153 if (!mpd_qshiftl(&big_aligned, big, shift, status)) {
3154 mpd_seterror(result, MPD_Malloc_error, status);
3155 goto finish;
3156 }
3157 big = &big_aligned;
3158 }
3159 }
3160 result->exp = small->exp;
3161
3162
3163 /* compare length of coefficients */
3164 if (big->len < small->len) {
3165 _mpd_ptrswap(&big, &small);
3166 swap++;
3167 }
3168
3169 newsize = big->len;
3170 if (!mpd_qresize(result, newsize, status)) {
3171 goto finish;
3172 }
3173
3174 if (mpd_sign(a) == sign_b) {
3175
3176 carry = _mpd_baseadd(result->data, big->data, small->data,
3177 big->len, small->len);
3178
3179 if (carry) {
3180 newsize = big->len + 1;
3181 if (!mpd_qresize(result, newsize, status)) {
3182 goto finish;
3183 }
3184 result->data[newsize-1] = carry;
3185 }
3186
3187 result->len = newsize;
3188 mpd_set_flags(result, sign_b);
3189 }
3190 else {
3191 if (big->len == small->len) {
3192 for (i=big->len-1; i >= 0; --i) {
3193 if (big->data[i] != small->data[i]) {
3194 if (big->data[i] < small->data[i]) {
3195 _mpd_ptrswap(&big, &small);
3196 swap++;
3197 }
3198 break;
3199 }
3200 }
3201 }
3202
3203 _mpd_basesub(result->data, big->data, small->data,
3204 big->len, small->len);
3205 newsize = _mpd_real_size(result->data, big->len);
3206 /* resize to smaller cannot fail */
3207 (void)mpd_qresize(result, newsize, status);
3208
3209 result->len = newsize;
3210 sign_b = (swap & 1) ? sign_b : mpd_sign(a);
3211 mpd_set_flags(result, sign_b);
3212
3213 if (mpd_iszerocoeff(result)) {
3214 mpd_set_positive(result);
3215 if (ctx->round == MPD_ROUND_FLOOR) {
3216 mpd_set_negative(result);
3217 }
3218 }
3219 }
3220
3221 mpd_setdigits(result);
3222
3223finish:
3224 mpd_del(&big_aligned);
3225}
3226
3227/* Add a and b. No specials, no finalizing. */
3228static void
3229_mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
3230 const mpd_context_t *ctx, uint32_t *status)
3231{
3232 _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
3233}
3234
3235/* Subtract b from a. No specials, no finalizing. */
3236static void
3237_mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
3238 const mpd_context_t *ctx, uint32_t *status)
3239{
3240 _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
3241}
3242
3243/* Add a and b. */
3244void
3245mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
3246 const mpd_context_t *ctx, uint32_t *status)
3247{
3248 if (mpd_isspecial(a) || mpd_isspecial(b)) {
3249 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
3250 return;
3251 }
3252 _mpd_qaddsub_inf(result, a, b, mpd_sign(b), status);
3253 return;
3254 }
3255
3256 _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
3257 mpd_qfinalize(result, ctx, status);
3258}
3259
Stefan Krah3c23a872012-04-20 19:59:20 +02003260/* Add a and b. Set NaN/Invalid_operation if the result is inexact. */
3261static void
3262_mpd_qadd_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
3263 const mpd_context_t *ctx, uint32_t *status)
3264{
3265 uint32_t workstatus = 0;
3266
3267 mpd_qadd(result, a, b, ctx, &workstatus);
3268 *status |= workstatus;
3269 if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
3270 mpd_seterror(result, MPD_Invalid_operation, status);
3271 }
3272}
3273
Stefan Krah1919b7e2012-03-21 18:25:23 +01003274/* Subtract b from a. */
3275void
3276mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
3277 const mpd_context_t *ctx, uint32_t *status)
3278{
3279 if (mpd_isspecial(a) || mpd_isspecial(b)) {
3280 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
3281 return;
3282 }
3283 _mpd_qaddsub_inf(result, a, b, !mpd_sign(b), status);
3284 return;
3285 }
3286
3287 _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
3288 mpd_qfinalize(result, ctx, status);
3289}
3290
Stefan Krah3c23a872012-04-20 19:59:20 +02003291/* Subtract b from a. Set NaN/Invalid_operation if the result is inexact. */
3292static void
3293_mpd_qsub_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
3294 const mpd_context_t *ctx, uint32_t *status)
3295{
3296 uint32_t workstatus = 0;
3297
3298 mpd_qsub(result, a, b, ctx, &workstatus);
3299 *status |= workstatus;
3300 if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
3301 mpd_seterror(result, MPD_Invalid_operation, status);
3302 }
3303}
3304
Stefan Krah1919b7e2012-03-21 18:25:23 +01003305/* Add decimal and mpd_ssize_t. */
3306void
3307mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
3308 const mpd_context_t *ctx, uint32_t *status)
3309{
3310 mpd_context_t maxcontext;
3311 MPD_NEW_STATIC(bb,0,0,0,0);
3312
3313 mpd_maxcontext(&maxcontext);
3314 mpd_qsset_ssize(&bb, b, &maxcontext, status);
3315 mpd_qadd(result, a, &bb, ctx, status);
3316 mpd_del(&bb);
3317}
3318
3319/* Add decimal and mpd_uint_t. */
3320void
3321mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
3322 const mpd_context_t *ctx, uint32_t *status)
3323{
3324 mpd_context_t maxcontext;
3325 MPD_NEW_STATIC(bb,0,0,0,0);
3326
3327 mpd_maxcontext(&maxcontext);
3328 mpd_qsset_uint(&bb, b, &maxcontext, status);
3329 mpd_qadd(result, a, &bb, ctx, status);
3330 mpd_del(&bb);
3331}
3332
3333/* Subtract mpd_ssize_t from decimal. */
3334void
3335mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
3336 const mpd_context_t *ctx, uint32_t *status)
3337{
3338 mpd_context_t maxcontext;
3339 MPD_NEW_STATIC(bb,0,0,0,0);
3340
3341 mpd_maxcontext(&maxcontext);
3342 mpd_qsset_ssize(&bb, b, &maxcontext, status);
3343 mpd_qsub(result, a, &bb, ctx, status);
3344 mpd_del(&bb);
3345}
3346
3347/* Subtract mpd_uint_t from decimal. */
3348void
3349mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
3350 const mpd_context_t *ctx, uint32_t *status)
3351{
3352 mpd_context_t maxcontext;
3353 MPD_NEW_STATIC(bb,0,0,0,0);
3354
3355 mpd_maxcontext(&maxcontext);
3356 mpd_qsset_uint(&bb, b, &maxcontext, status);
3357 mpd_qsub(result, a, &bb, ctx, status);
3358 mpd_del(&bb);
3359}
3360
3361/* Add decimal and int32_t. */
3362void
3363mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b,
3364 const mpd_context_t *ctx, uint32_t *status)
3365{
3366 mpd_qadd_ssize(result, a, b, ctx, status);
3367}
3368
3369/* Add decimal and uint32_t. */
3370void
3371mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b,
3372 const mpd_context_t *ctx, uint32_t *status)
3373{
3374 mpd_qadd_uint(result, a, b, ctx, status);
3375}
3376
3377#ifdef CONFIG_64
3378/* Add decimal and int64_t. */
3379void
3380mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
3381 const mpd_context_t *ctx, uint32_t *status)
3382{
3383 mpd_qadd_ssize(result, a, b, ctx, status);
3384}
3385
3386/* Add decimal and uint64_t. */
3387void
3388mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3389 const mpd_context_t *ctx, uint32_t *status)
3390{
3391 mpd_qadd_uint(result, a, b, ctx, status);
3392}
3393#endif
3394
3395/* Subtract int32_t from decimal. */
3396void
3397mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b,
3398 const mpd_context_t *ctx, uint32_t *status)
3399{
3400 mpd_qsub_ssize(result, a, b, ctx, status);
3401}
3402
3403/* Subtract uint32_t from decimal. */
3404void
3405mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b,
3406 const mpd_context_t *ctx, uint32_t *status)
3407{
3408 mpd_qsub_uint(result, a, b, ctx, status);
3409}
3410
3411#ifdef CONFIG_64
3412/* Subtract int64_t from decimal. */
3413void
3414mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
3415 const mpd_context_t *ctx, uint32_t *status)
3416{
3417 mpd_qsub_ssize(result, a, b, ctx, status);
3418}
3419
3420/* Subtract uint64_t from decimal. */
3421void
3422mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3423 const mpd_context_t *ctx, uint32_t *status)
3424{
3425 mpd_qsub_uint(result, a, b, ctx, status);
3426}
3427#endif
3428
3429
3430/* Divide infinities. */
3431static void
3432_mpd_qdiv_inf(mpd_t *result, const mpd_t *a, const mpd_t *b,
3433 const mpd_context_t *ctx, uint32_t *status)
3434{
3435 if (mpd_isinfinite(a)) {
3436 if (mpd_isinfinite(b)) {
3437 mpd_seterror(result, MPD_Invalid_operation, status);
3438 return;
3439 }
3440 mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
3441 return;
3442 }
3443 assert(mpd_isinfinite(b));
3444 _settriple(result, mpd_sign(a)^mpd_sign(b), 0, mpd_etiny(ctx));
3445 *status |= MPD_Clamped;
3446}
3447
3448enum {NO_IDEAL_EXP, SET_IDEAL_EXP};
3449/* Divide a by b. */
3450static void
3451_mpd_qdiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
3452 const mpd_context_t *ctx, uint32_t *status)
3453{
3454 MPD_NEW_STATIC(aligned,0,0,0,0);
3455 mpd_uint_t ld;
3456 mpd_ssize_t shift, exp, tz;
3457 mpd_ssize_t newsize;
3458 mpd_ssize_t ideal_exp;
3459 mpd_uint_t rem;
3460 uint8_t sign_a = mpd_sign(a);
3461 uint8_t sign_b = mpd_sign(b);
3462
3463
3464 if (mpd_isspecial(a) || mpd_isspecial(b)) {
3465 if (mpd_qcheck_nans(q, a, b, ctx, status)) {
3466 return;
3467 }
3468 _mpd_qdiv_inf(q, a, b, ctx, status);
3469 return;
3470 }
3471 if (mpd_iszerocoeff(b)) {
3472 if (mpd_iszerocoeff(a)) {
3473 mpd_seterror(q, MPD_Division_undefined, status);
3474 }
3475 else {
3476 mpd_setspecial(q, sign_a^sign_b, MPD_INF);
3477 *status |= MPD_Division_by_zero;
3478 }
3479 return;
3480 }
3481 if (mpd_iszerocoeff(a)) {
3482 exp = a->exp - b->exp;
3483 _settriple(q, sign_a^sign_b, 0, exp);
3484 mpd_qfinalize(q, ctx, status);
3485 return;
3486 }
3487
3488 shift = (b->digits - a->digits) + ctx->prec + 1;
3489 ideal_exp = a->exp - b->exp;
3490 exp = ideal_exp - shift;
3491 if (shift > 0) {
3492 if (!mpd_qshiftl(&aligned, a, shift, status)) {
3493 mpd_seterror(q, MPD_Malloc_error, status);
3494 goto finish;
3495 }
3496 a = &aligned;
3497 }
3498 else if (shift < 0) {
3499 shift = -shift;
3500 if (!mpd_qshiftl(&aligned, b, shift, status)) {
3501 mpd_seterror(q, MPD_Malloc_error, status);
3502 goto finish;
3503 }
3504 b = &aligned;
3505 }
3506
3507
3508 newsize = a->len - b->len + 1;
3509 if ((q != b && q != a) || (q == b && newsize > b->len)) {
3510 if (!mpd_qresize(q, newsize, status)) {
3511 mpd_seterror(q, MPD_Malloc_error, status);
3512 goto finish;
3513 }
3514 }
3515
3516
3517 if (b->len == 1) {
3518 rem = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
3519 }
3520 else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
3521 b->len < MPD_NEWTONDIV_CUTOFF) {
3522 int ret = _mpd_basedivmod(q->data, NULL, a->data, b->data,
3523 a->len, b->len);
3524 if (ret < 0) {
3525 mpd_seterror(q, MPD_Malloc_error, status);
3526 goto finish;
3527 }
3528 rem = ret;
3529 }
3530 else {
3531 MPD_NEW_STATIC(r,0,0,0,0);
Stefan Krah3c23a872012-04-20 19:59:20 +02003532 _mpd_base_ndivmod(q, &r, a, b, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003533 if (mpd_isspecial(q) || mpd_isspecial(&r)) {
Stefan Krah9d3a5ae2012-04-20 21:00:31 +02003534 mpd_setspecial(q, MPD_POS, MPD_NAN);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003535 mpd_del(&r);
3536 goto finish;
3537 }
3538 rem = !mpd_iszerocoeff(&r);
3539 mpd_del(&r);
3540 newsize = q->len;
3541 }
3542
3543 newsize = _mpd_real_size(q->data, newsize);
3544 /* resize to smaller cannot fail */
3545 mpd_qresize(q, newsize, status);
Stefan Krahdc36efa2012-04-07 15:57:59 +02003546 mpd_set_flags(q, sign_a^sign_b);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003547 q->len = newsize;
3548 mpd_setdigits(q);
3549
3550 shift = ideal_exp - exp;
3551 if (rem) {
3552 ld = mpd_lsd(q->data[0]);
3553 if (ld == 0 || ld == 5) {
3554 q->data[0] += 1;
3555 }
3556 }
3557 else if (action == SET_IDEAL_EXP && shift > 0) {
3558 tz = mpd_trail_zeros(q);
3559 shift = (tz > shift) ? shift : tz;
3560 mpd_qshiftr_inplace(q, shift);
3561 exp += shift;
3562 }
3563
Stefan Krah1919b7e2012-03-21 18:25:23 +01003564 q->exp = exp;
3565
3566
3567finish:
3568 mpd_del(&aligned);
3569 mpd_qfinalize(q, ctx, status);
3570}
3571
3572/* Divide a by b. */
3573void
3574mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
3575 const mpd_context_t *ctx, uint32_t *status)
3576{
3577 _mpd_qdiv(SET_IDEAL_EXP, q, a, b, ctx, status);
3578}
3579
3580/* Internal function. */
3581static void
3582_mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
Stefan Krah5d0d2e22012-04-18 18:59:56 +02003583 const mpd_context_t *ctx, uint32_t *status)
Stefan Krah1919b7e2012-03-21 18:25:23 +01003584{
3585 MPD_NEW_STATIC(aligned,0,0,0,0);
3586 mpd_ssize_t qsize, rsize;
3587 mpd_ssize_t ideal_exp, expdiff, shift;
3588 uint8_t sign_a = mpd_sign(a);
3589 uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
3590
3591
3592 ideal_exp = (a->exp > b->exp) ? b->exp : a->exp;
3593 if (mpd_iszerocoeff(a)) {
3594 if (!mpd_qcopy(r, a, status)) {
3595 goto nanresult; /* GCOV_NOT_REACHED */
3596 }
3597 r->exp = ideal_exp;
3598 _settriple(q, sign_ab, 0, 0);
3599 return;
3600 }
3601
3602 expdiff = mpd_adjexp(a) - mpd_adjexp(b);
3603 if (expdiff < 0) {
3604 if (a->exp > b->exp) {
3605 /* positive and less than b->digits - a->digits */
3606 shift = a->exp - b->exp;
3607 if (!mpd_qshiftl(r, a, shift, status)) {
3608 goto nanresult;
3609 }
3610 r->exp = ideal_exp;
3611 }
3612 else {
3613 if (!mpd_qcopy(r, a, status)) {
3614 goto nanresult;
3615 }
3616 }
3617 _settriple(q, sign_ab, 0, 0);
3618 return;
3619 }
3620 if (expdiff > ctx->prec) {
3621 *status |= MPD_Division_impossible;
3622 goto nanresult;
3623 }
3624
3625
3626 /*
3627 * At this point we have:
3628 * (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
3629 * (2) a->exp - b->exp >= b->digits - a->digits
3630 * (3) a->exp - b->exp <= prec + b->digits - a->digits
3631 */
3632 if (a->exp != b->exp) {
3633 shift = a->exp - b->exp;
3634 if (shift > 0) {
3635 /* by (3), after the shift a->digits <= prec + b->digits */
3636 if (!mpd_qshiftl(&aligned, a, shift, status)) {
3637 goto nanresult;
3638 }
3639 a = &aligned;
3640 }
3641 else {
3642 shift = -shift;
3643 /* by (2), after the shift b->digits <= a->digits */
3644 if (!mpd_qshiftl(&aligned, b, shift, status)) {
3645 goto nanresult;
3646 }
3647 b = &aligned;
3648 }
3649 }
3650
3651
3652 qsize = a->len - b->len + 1;
3653 if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
3654 if (!mpd_qresize(q, qsize, status)) {
3655 goto nanresult;
3656 }
3657 }
3658
3659 rsize = b->len;
3660 if (!(r == a && rsize < a->len)) {
3661 if (!mpd_qresize(r, rsize, status)) {
3662 goto nanresult;
3663 }
3664 }
3665
3666 if (b->len == 1) {
3667 if (a->len == 1) {
3668 _mpd_div_word(&q->data[0], &r->data[0], a->data[0], b->data[0]);
3669 }
3670 else {
3671 r->data[0] = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
3672 }
3673 }
3674 else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
3675 b->len < MPD_NEWTONDIV_CUTOFF) {
3676 int ret;
3677 ret = _mpd_basedivmod(q->data, r->data, a->data, b->data,
3678 a->len, b->len);
3679 if (ret == -1) {
3680 *status |= MPD_Malloc_error;
3681 goto nanresult;
3682 }
3683 }
3684 else {
Stefan Krah3c23a872012-04-20 19:59:20 +02003685 _mpd_base_ndivmod(q, r, a, b, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003686 if (mpd_isspecial(q) || mpd_isspecial(r)) {
3687 goto nanresult;
3688 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01003689 qsize = q->len;
3690 rsize = r->len;
3691 }
3692
3693 qsize = _mpd_real_size(q->data, qsize);
3694 /* resize to smaller cannot fail */
3695 mpd_qresize(q, qsize, status);
3696 q->len = qsize;
3697 mpd_setdigits(q);
3698 mpd_set_flags(q, sign_ab);
3699 q->exp = 0;
3700 if (q->digits > ctx->prec) {
3701 *status |= MPD_Division_impossible;
3702 goto nanresult;
3703 }
3704
3705 rsize = _mpd_real_size(r->data, rsize);
3706 /* resize to smaller cannot fail */
3707 mpd_qresize(r, rsize, status);
3708 r->len = rsize;
3709 mpd_setdigits(r);
3710 mpd_set_flags(r, sign_a);
3711 r->exp = ideal_exp;
3712
3713out:
3714 mpd_del(&aligned);
3715 return;
3716
3717nanresult:
3718 mpd_setspecial(q, MPD_POS, MPD_NAN);
3719 mpd_setspecial(r, MPD_POS, MPD_NAN);
3720 goto out;
3721}
3722
3723/* Integer division with remainder. */
3724void
3725mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
3726 const mpd_context_t *ctx, uint32_t *status)
3727{
3728 uint8_t sign = mpd_sign(a)^mpd_sign(b);
3729
3730 if (mpd_isspecial(a) || mpd_isspecial(b)) {
3731 if (mpd_qcheck_nans(q, a, b, ctx, status)) {
3732 mpd_qcopy(r, q, status);
3733 return;
3734 }
3735 if (mpd_isinfinite(a)) {
3736 if (mpd_isinfinite(b)) {
3737 mpd_setspecial(q, MPD_POS, MPD_NAN);
3738 }
3739 else {
3740 mpd_setspecial(q, sign, MPD_INF);
3741 }
3742 mpd_setspecial(r, MPD_POS, MPD_NAN);
3743 *status |= MPD_Invalid_operation;
3744 return;
3745 }
3746 if (mpd_isinfinite(b)) {
3747 if (!mpd_qcopy(r, a, status)) {
3748 mpd_seterror(q, MPD_Malloc_error, status);
3749 return;
3750 }
3751 mpd_qfinalize(r, ctx, status);
3752 _settriple(q, sign, 0, 0);
3753 return;
3754 }
3755 /* debug */
3756 abort(); /* GCOV_NOT_REACHED */
3757 }
3758 if (mpd_iszerocoeff(b)) {
3759 if (mpd_iszerocoeff(a)) {
3760 mpd_setspecial(q, MPD_POS, MPD_NAN);
3761 mpd_setspecial(r, MPD_POS, MPD_NAN);
3762 *status |= MPD_Division_undefined;
3763 }
3764 else {
3765 mpd_setspecial(q, sign, MPD_INF);
3766 mpd_setspecial(r, MPD_POS, MPD_NAN);
3767 *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
3768 }
3769 return;
3770 }
3771
3772 _mpd_qdivmod(q, r, a, b, ctx, status);
3773 mpd_qfinalize(q, ctx, status);
3774 mpd_qfinalize(r, ctx, status);
3775}
3776
3777void
3778mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
3779 const mpd_context_t *ctx, uint32_t *status)
3780{
3781 MPD_NEW_STATIC(r,0,0,0,0);
3782 uint8_t sign = mpd_sign(a)^mpd_sign(b);
3783
3784 if (mpd_isspecial(a) || mpd_isspecial(b)) {
3785 if (mpd_qcheck_nans(q, a, b, ctx, status)) {
3786 return;
3787 }
3788 if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
3789 mpd_seterror(q, MPD_Invalid_operation, status);
3790 return;
3791 }
3792 if (mpd_isinfinite(a)) {
3793 mpd_setspecial(q, sign, MPD_INF);
3794 return;
3795 }
3796 if (mpd_isinfinite(b)) {
3797 _settriple(q, sign, 0, 0);
3798 return;
3799 }
3800 /* debug */
3801 abort(); /* GCOV_NOT_REACHED */
3802 }
3803 if (mpd_iszerocoeff(b)) {
3804 if (mpd_iszerocoeff(a)) {
3805 mpd_seterror(q, MPD_Division_undefined, status);
3806 }
3807 else {
3808 mpd_setspecial(q, sign, MPD_INF);
3809 *status |= MPD_Division_by_zero;
3810 }
3811 return;
3812 }
3813
3814
3815 _mpd_qdivmod(q, &r, a, b, ctx, status);
3816 mpd_del(&r);
3817 mpd_qfinalize(q, ctx, status);
3818}
3819
3820/* Divide decimal by mpd_ssize_t. */
3821void
3822mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
3823 const mpd_context_t *ctx, uint32_t *status)
3824{
3825 mpd_context_t maxcontext;
3826 MPD_NEW_STATIC(bb,0,0,0,0);
3827
3828 mpd_maxcontext(&maxcontext);
3829 mpd_qsset_ssize(&bb, b, &maxcontext, status);
3830 mpd_qdiv(result, a, &bb, ctx, status);
3831 mpd_del(&bb);
3832}
3833
3834/* Divide decimal by mpd_uint_t. */
3835void
3836mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
3837 const mpd_context_t *ctx, uint32_t *status)
3838{
3839 mpd_context_t maxcontext;
3840 MPD_NEW_STATIC(bb,0,0,0,0);
3841
3842 mpd_maxcontext(&maxcontext);
3843 mpd_qsset_uint(&bb, b, &maxcontext, status);
3844 mpd_qdiv(result, a, &bb, ctx, status);
3845 mpd_del(&bb);
3846}
3847
3848/* Divide decimal by int32_t. */
3849void
3850mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b,
3851 const mpd_context_t *ctx, uint32_t *status)
3852{
3853 mpd_qdiv_ssize(result, a, b, ctx, status);
3854}
3855
3856/* Divide decimal by uint32_t. */
3857void
3858mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b,
3859 const mpd_context_t *ctx, uint32_t *status)
3860{
3861 mpd_qdiv_uint(result, a, b, ctx, status);
3862}
3863
3864#ifdef CONFIG_64
3865/* Divide decimal by int64_t. */
3866void
3867mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
3868 const mpd_context_t *ctx, uint32_t *status)
3869{
3870 mpd_qdiv_ssize(result, a, b, ctx, status);
3871}
3872
3873/* Divide decimal by uint64_t. */
3874void
3875mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
3876 const mpd_context_t *ctx, uint32_t *status)
3877{
3878 mpd_qdiv_uint(result, a, b, ctx, status);
3879}
3880#endif
3881
Stefan Krah696d10f2012-05-16 20:10:21 +02003882/* Pad the result with trailing zeros if it has fewer digits than prec. */
3883static void
3884_mpd_zeropad(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
3885{
3886 if (!mpd_isspecial(result) && !mpd_iszero(result) &&
3887 result->digits < ctx->prec) {
3888 mpd_ssize_t shift = ctx->prec - result->digits;
3889 mpd_qshiftl(result, result, shift, status);
3890 result->exp -= shift;
3891 }
3892}
3893
3894/* Check if the result is guaranteed to be one. */
3895static int
3896_mpd_qexp_check_one(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
3897 uint32_t *status)
3898{
3899 MPD_NEW_CONST(lim,0,-(ctx->prec+1),1,1,1,9);
3900 MPD_NEW_SHARED(aa, a);
3901
3902 mpd_set_positive(&aa);
3903
3904 /* abs(a) <= 9 * 10**(-prec-1) */
3905 if (_mpd_cmp(&aa, &lim) <= 0) {
3906 _settriple(result, 0, 1, 0);
Stefan Krah30c35e82012-05-31 15:09:27 +02003907 *status |= MPD_Rounded|MPD_Inexact;
Stefan Krah696d10f2012-05-16 20:10:21 +02003908 return 1;
3909 }
3910
3911 return 0;
3912}
3913
Stefan Krah1919b7e2012-03-21 18:25:23 +01003914/*
3915 * Get the number of iterations for the Horner scheme in _mpd_qexp().
3916 */
3917static inline mpd_ssize_t
Stefan Krah696d10f2012-05-16 20:10:21 +02003918_mpd_get_exp_iterations(const mpd_t *r, mpd_ssize_t p)
Stefan Krah1919b7e2012-03-21 18:25:23 +01003919{
Stefan Krah696d10f2012-05-16 20:10:21 +02003920 mpd_ssize_t log10pbyr; /* lower bound for log10(p / abs(r)) */
3921 mpd_ssize_t n;
Stefan Krah1919b7e2012-03-21 18:25:23 +01003922
Stefan Krah696d10f2012-05-16 20:10:21 +02003923 assert(p >= 10);
3924 assert(!mpd_iszero(r));
3925 assert(-p < mpd_adjexp(r) && mpd_adjexp(r) <= -1);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003926
3927#ifdef CONFIG_64
Stefan Krah696d10f2012-05-16 20:10:21 +02003928 if (p > (mpd_ssize_t)(1ULL<<52)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01003929 return MPD_SSIZE_MAX;
3930 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01003931#endif
Stefan Krah696d10f2012-05-16 20:10:21 +02003932
3933 /*
3934 * Lower bound for log10(p / abs(r)): adjexp(p) - (adjexp(r) + 1)
3935 * At this point (for CONFIG_64, CONFIG_32 is not problematic):
3936 * 1) 10 <= p <= 2**52
3937 * 2) -p < adjexp(r) <= -1
3938 * 3) 1 <= log10pbyr <= 2**52 + 14
3939 */
3940 log10pbyr = (mpd_word_digits(p)-1) - (mpd_adjexp(r)+1);
3941
3942 /*
3943 * The numerator in the paper is 1.435 * p - 1.182, calculated
3944 * exactly. We compensate for rounding errors by using 1.43503.
3945 * ACL2 proofs:
3946 * 1) exp-iter-approx-lower-bound: The term below evaluated
3947 * in 53-bit floating point arithmetic is greater than or
3948 * equal to the exact term used in the paper.
3949 * 2) exp-iter-approx-upper-bound: The term below is less than
3950 * or equal to 3/2 * p <= 3/2 * 2**52.
3951 */
3952 n = (mpd_ssize_t)ceil((1.43503*(double)p - 1.182) / (double)log10pbyr);
3953 return n >= 3 ? n : 3;
Stefan Krah1919b7e2012-03-21 18:25:23 +01003954}
3955
3956/*
Stefan Krah9a5beec2012-05-31 16:21:34 +02003957 * Internal function, specials have been dealt with. Apart from Overflow
3958 * and Underflow, two cases must be considered for the error of the result:
3959 *
3960 * 1) abs(a) <= 9 * 10**(-prec-1) ==> result == 1
3961 *
3962 * Absolute error: abs(1 - e**x) < 10**(-prec)
3963 * -------------------------------------------
3964 *
3965 * 2) abs(a) > 9 * 10**(-prec-1)
3966 *
3967 * Relative error: abs(result - e**x) < 0.5 * 10**(-prec) * e**x
3968 * -------------------------------------------------------------
Stefan Krah1919b7e2012-03-21 18:25:23 +01003969 *
3970 * The algorithm is from Hull&Abrham, Variable Precision Exponential Function,
3971 * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986.
3972 *
3973 * Main differences:
3974 *
Stefan Krah696d10f2012-05-16 20:10:21 +02003975 * - The number of iterations for the Horner scheme is calculated using
3976 * 53-bit floating point arithmetic.
3977 *
3978 * - In the error analysis for ER (relative error accumulated in the
3979 * evaluation of the truncated series) the reduced operand r may
3980 * have any number of digits.
3981 * ACL2 proof: exponent-relative-error
Stefan Krah1919b7e2012-03-21 18:25:23 +01003982 *
3983 * - The analysis for early abortion has been adapted for the mpd_t
3984 * ranges.
3985 */
3986static void
3987_mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
3988 uint32_t *status)
3989{
3990 mpd_context_t workctx;
3991 MPD_NEW_STATIC(tmp,0,0,0,0);
3992 MPD_NEW_STATIC(sum,0,0,0,0);
Stefan Krah67ee1d02012-06-01 10:58:16 +02003993 MPD_NEW_CONST(word,0,0,1,1,1,1);
Stefan Krah1919b7e2012-03-21 18:25:23 +01003994 mpd_ssize_t j, n, t;
3995
3996 assert(!mpd_isspecial(a));
3997
Stefan Krah696d10f2012-05-16 20:10:21 +02003998 if (mpd_iszerocoeff(a)) {
3999 _settriple(result, MPD_POS, 1, 0);
4000 return;
4001 }
4002
Stefan Krah1919b7e2012-03-21 18:25:23 +01004003 /*
Stefan Krah696d10f2012-05-16 20:10:21 +02004004 * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where abs(r) < 1 and t >= 0.
Stefan Krah1919b7e2012-03-21 18:25:23 +01004005 *
4006 * If t > 0, we have:
4007 *
Stefan Krah696d10f2012-05-16 20:10:21 +02004008 * (1) 0.1 <= r < 1, so e^0.1 <= e^r. If t > MAX_T, overflow occurs:
Stefan Krah1919b7e2012-03-21 18:25:23 +01004009 *
Stefan Krah696d10f2012-05-16 20:10:21 +02004010 * MAX-EMAX+1 < log10(e^(0.1*10*t)) <= log10(e^(r*10^t)) < adjexp(e^(r*10^t))+1
4011 *
Stefan Krah9a5beec2012-05-31 16:21:34 +02004012 * (2) -1 < r <= -0.1, so e^r <= e^-0.1. If t > MAX_T, underflow occurs:
Stefan Krah696d10f2012-05-16 20:10:21 +02004013 *
Stefan Krah9a5beec2012-05-31 16:21:34 +02004014 * adjexp(e^(r*10^t)) <= log10(e^(r*10^t)) <= log10(e^(-0.1*10^t)) < MIN-ETINY
Stefan Krah1919b7e2012-03-21 18:25:23 +01004015 */
4016#if defined(CONFIG_64)
4017 #define MPD_EXP_MAX_T 19
4018#elif defined(CONFIG_32)
4019 #define MPD_EXP_MAX_T 10
4020#endif
4021 t = a->digits + a->exp;
4022 t = (t > 0) ? t : 0;
4023 if (t > MPD_EXP_MAX_T) {
4024 if (mpd_ispositive(a)) {
4025 mpd_setspecial(result, MPD_POS, MPD_INF);
4026 *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
4027 }
4028 else {
4029 _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
4030 *status |= (MPD_Inexact|MPD_Rounded|MPD_Subnormal|
4031 MPD_Underflow|MPD_Clamped);
4032 }
4033 return;
4034 }
4035
Stefan Krah696d10f2012-05-16 20:10:21 +02004036 /* abs(a) <= 9 * 10**(-prec-1) */
4037 if (_mpd_qexp_check_one(result, a, ctx, status)) {
4038 return;
4039 }
4040
Stefan Krah1919b7e2012-03-21 18:25:23 +01004041 mpd_maxcontext(&workctx);
4042 workctx.prec = ctx->prec + t + 2;
Stefan Krah696d10f2012-05-16 20:10:21 +02004043 workctx.prec = (workctx.prec < 10) ? 10 : workctx.prec;
Stefan Krah1919b7e2012-03-21 18:25:23 +01004044 workctx.round = MPD_ROUND_HALF_EVEN;
4045
Stefan Krah1919b7e2012-03-21 18:25:23 +01004046 if (!mpd_qcopy(result, a, status)) {
Stefan Krah696d10f2012-05-16 20:10:21 +02004047 return;
Stefan Krah1919b7e2012-03-21 18:25:23 +01004048 }
4049 result->exp -= t;
4050
Stefan Krah696d10f2012-05-16 20:10:21 +02004051 /*
4052 * At this point:
4053 * 1) 9 * 10**(-prec-1) < abs(a)
4054 * 2) 9 * 10**(-prec-t-1) < abs(r)
4055 * 3) log10(9) - prec - t - 1 < log10(abs(r)) < adjexp(abs(r)) + 1
4056 * 4) - prec - t - 2 < adjexp(abs(r)) <= -1
4057 */
4058 n = _mpd_get_exp_iterations(result, workctx.prec);
4059 if (n == MPD_SSIZE_MAX) {
4060 mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */
4061 return; /* GCOV_UNLIKELY */
4062 }
4063
Stefan Krah1919b7e2012-03-21 18:25:23 +01004064 _settriple(&sum, MPD_POS, 1, 0);
4065
4066 for (j = n-1; j >= 1; j--) {
4067 word.data[0] = j;
4068 mpd_setdigits(&word);
4069 mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status);
Stefan Krah696d10f2012-05-16 20:10:21 +02004070 mpd_qfma(&sum, &sum, &tmp, &one, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004071 }
4072
4073#ifdef CONFIG_64
4074 _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
4075#else
4076 if (t <= MPD_MAX_POW10) {
4077 _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
4078 }
4079 else {
4080 t -= MPD_MAX_POW10;
4081 _mpd_qpow_uint(&tmp, &sum, mpd_pow10[MPD_MAX_POW10], MPD_POS,
4082 &workctx, status);
4083 _mpd_qpow_uint(result, &tmp, mpd_pow10[t], MPD_POS, &workctx, status);
4084 }
4085#endif
4086
Stefan Krah1919b7e2012-03-21 18:25:23 +01004087 mpd_del(&tmp);
4088 mpd_del(&sum);
4089 *status |= (workctx.status&MPD_Errors);
4090 *status |= (MPD_Inexact|MPD_Rounded);
4091}
4092
4093/* exp(a) */
4094void
4095mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4096 uint32_t *status)
4097{
4098 mpd_context_t workctx;
4099
4100 if (mpd_isspecial(a)) {
4101 if (mpd_qcheck_nan(result, a, ctx, status)) {
4102 return;
4103 }
4104 if (mpd_isnegative(a)) {
4105 _settriple(result, MPD_POS, 0, 0);
4106 }
4107 else {
4108 mpd_setspecial(result, MPD_POS, MPD_INF);
4109 }
4110 return;
4111 }
4112 if (mpd_iszerocoeff(a)) {
4113 _settriple(result, MPD_POS, 1, 0);
4114 return;
4115 }
4116
4117 workctx = *ctx;
4118 workctx.round = MPD_ROUND_HALF_EVEN;
4119
4120 if (ctx->allcr) {
4121 MPD_NEW_STATIC(t1, 0,0,0,0);
4122 MPD_NEW_STATIC(t2, 0,0,0,0);
4123 MPD_NEW_STATIC(ulp, 0,0,0,0);
4124 MPD_NEW_STATIC(aa, 0,0,0,0);
4125 mpd_ssize_t prec;
Stefan Krah4d3e0a62012-05-31 20:01:05 +02004126 mpd_ssize_t ulpexp;
4127 uint32_t workstatus;
Stefan Krah1919b7e2012-03-21 18:25:23 +01004128
4129 if (result == a) {
4130 if (!mpd_qcopy(&aa, a, status)) {
4131 mpd_seterror(result, MPD_Malloc_error, status);
4132 return;
4133 }
4134 a = &aa;
4135 }
4136
4137 workctx.clamp = 0;
4138 prec = ctx->prec + 3;
4139 while (1) {
4140 workctx.prec = prec;
Stefan Krah4d3e0a62012-05-31 20:01:05 +02004141 workstatus = 0;
4142
4143 _mpd_qexp(result, a, &workctx, &workstatus);
4144 *status |= workstatus;
4145
4146 ulpexp = result->exp + result->digits - workctx.prec;
4147 if (workstatus & MPD_Underflow) {
4148 /* The effective work precision is result->digits. */
4149 ulpexp = result->exp;
4150 }
4151 _ssettriple(&ulp, MPD_POS, 1, ulpexp);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004152
Stefan Krah696d10f2012-05-16 20:10:21 +02004153 /*
Stefan Krah4d3e0a62012-05-31 20:01:05 +02004154 * At this point [1]:
Stefan Krah696d10f2012-05-16 20:10:21 +02004155 * 1) abs(result - e**x) < 0.5 * 10**(-prec) * e**x
4156 * 2) result - ulp < e**x < result + ulp
4157 * 3) result - ulp < result < result + ulp
4158 *
4159 * If round(result-ulp)==round(result+ulp), then
4160 * round(result)==round(e**x). Therefore the result
4161 * is correctly rounded.
Stefan Krah4d3e0a62012-05-31 20:01:05 +02004162 *
4163 * [1] If abs(a) <= 9 * 10**(-prec-1), use the absolute
4164 * error for a similar argument.
Stefan Krah696d10f2012-05-16 20:10:21 +02004165 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004166 workctx.prec = ctx->prec;
4167 mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
4168 mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
4169 if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
4170 mpd_qcmp(&t1, &t2, status) == 0) {
4171 workctx.clamp = ctx->clamp;
Stefan Krah02717662012-05-31 20:49:24 +02004172 _mpd_zeropad(result, &workctx, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004173 mpd_check_underflow(result, &workctx, status);
4174 mpd_qfinalize(result, &workctx, status);
4175 break;
4176 }
4177 prec += MPD_RDIGITS;
4178 }
4179 mpd_del(&t1);
4180 mpd_del(&t2);
4181 mpd_del(&ulp);
4182 mpd_del(&aa);
4183 }
4184 else {
4185 _mpd_qexp(result, a, &workctx, status);
Stefan Krah02717662012-05-31 20:49:24 +02004186 _mpd_zeropad(result, &workctx, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004187 mpd_check_underflow(result, &workctx, status);
4188 mpd_qfinalize(result, &workctx, status);
4189 }
4190}
4191
4192/* Fused multiply-add: (a * b) + c, with a single final rounding. */
4193void
4194mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
4195 const mpd_context_t *ctx, uint32_t *status)
4196{
4197 uint32_t workstatus = 0;
4198 mpd_t *cc = (mpd_t *)c;
4199
4200 if (result == c) {
4201 if ((cc = mpd_qncopy(c)) == NULL) {
4202 mpd_seterror(result, MPD_Malloc_error, status);
4203 return;
4204 }
4205 }
4206
4207 _mpd_qmul(result, a, b, ctx, &workstatus);
4208 if (!(workstatus&MPD_Invalid_operation)) {
4209 mpd_qadd(result, result, cc, ctx, &workstatus);
4210 }
4211
4212 if (cc != c) mpd_del(cc);
4213 *status |= workstatus;
4214}
4215
Stefan Kraha3394bc2012-06-06 15:57:18 +02004216/*
4217 * Schedule the optimal precision increase for the Newton iteration.
4218 * v := input operand
4219 * z_0 := initial approximation
4220 * initprec := natural number such that abs(log(v) - z_0) < 10**-initprec
4221 * maxprec := target precision
4222 *
4223 * For convenience the output klist contains the elements in reverse order:
4224 * klist := [k_n-1, ..., k_0], where
4225 * 1) k_0 <= initprec and
4226 * 2) abs(log(v) - result) < 10**(-2*k_n-1 + 1) <= 10**-maxprec.
4227 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004228static inline int
4229ln_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2], mpd_ssize_t maxprec,
4230 mpd_ssize_t initprec)
4231{
4232 mpd_ssize_t k;
4233 int i;
4234
4235 assert(maxprec >= 2 && initprec >= 2);
4236 if (maxprec <= initprec) return -1;
4237
4238 i = 0; k = maxprec;
4239 do {
4240 k = (k+2) / 2;
4241 klist[i++] = k;
4242 } while (k > initprec);
4243
4244 return i-1;
4245}
4246
Stefan Kraha3394bc2012-06-06 15:57:18 +02004247/* The constants have been verified with both decimal.py and mpfr. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004248#ifdef CONFIG_64
4249#if MPD_RDIGITS != 19
4250 #error "mpdecimal.c: MPD_RDIGITS must be 19."
4251#endif
4252static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
4253 6983716328982174407ULL, 9089704281976336583ULL, 1515961135648465461ULL,
4254 4416816335727555703ULL, 2900988039194170265ULL, 2307925037472986509ULL,
4255 107598438319191292ULL, 3466624107184669231ULL, 4450099781311469159ULL,
4256 9807828059751193854ULL, 7713456862091670584ULL, 1492198849978748873ULL,
4257 6528728696511086257ULL, 2385392051446341972ULL, 8692180205189339507ULL,
4258 6518769751037497088ULL, 2375253577097505395ULL, 9095610299291824318ULL,
4259 982748238504564801ULL, 5438635917781170543ULL, 7547331541421808427ULL,
4260 752371033310119785ULL, 3171643095059950878ULL, 9785265383207606726ULL,
4261 2932258279850258550ULL, 5497347726624257094ULL, 2976979522110718264ULL,
4262 9221477656763693866ULL, 1979650047149510504ULL, 6674183485704422507ULL,
4263 9702766860595249671ULL, 9278096762712757753ULL, 9314848524948644871ULL,
4264 6826928280848118428ULL, 754403708474699401ULL, 230105703089634572ULL,
4265 1929203337658714166ULL, 7589402567763113569ULL, 4208241314695689016ULL,
4266 2922455440575892572ULL, 9356734206705811364ULL, 2684916746550586856ULL,
4267 644507064800027750ULL, 9476834636167921018ULL, 5659121373450747856ULL,
4268 2835522011480466371ULL, 6470806855677432162ULL, 7141748003688084012ULL,
4269 9619404400222105101ULL, 5504893431493939147ULL, 6674744042432743651ULL,
4270 2287698219886746543ULL, 7773262884616336622ULL, 1985283935053089653ULL,
4271 4680843799894826233ULL, 8168948290720832555ULL, 8067566662873690987ULL,
4272 6248633409525465082ULL, 9829834196778404228ULL, 3524802359972050895ULL,
4273 3327900967572609677ULL, 110148862877297603ULL, 179914546843642076ULL,
4274 2302585092994045684ULL
4275};
4276#else
4277#if MPD_RDIGITS != 9
4278 #error "mpdecimal.c: MPD_RDIGITS must be 9."
4279#endif
4280static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
4281 401682692UL, 708474699UL, 720754403UL, 30896345UL, 602301057UL, 765871416UL,
4282 192920333UL, 763113569UL, 589402567UL, 956890167UL, 82413146UL, 589257242UL,
4283 245544057UL, 811364292UL, 734206705UL, 868569356UL, 167465505UL, 775026849UL,
4284 706480002UL, 18064450UL, 636167921UL, 569476834UL, 734507478UL, 156591213UL,
4285 148046637UL, 283552201UL, 677432162UL, 470806855UL, 880840126UL, 417480036UL,
4286 210510171UL, 940440022UL, 939147961UL, 893431493UL, 436515504UL, 440424327UL,
4287 654366747UL, 821988674UL, 622228769UL, 884616336UL, 537773262UL, 350530896UL,
4288 319852839UL, 989482623UL, 468084379UL, 720832555UL, 168948290UL, 736909878UL,
4289 675666628UL, 546508280UL, 863340952UL, 404228624UL, 834196778UL, 508959829UL,
4290 23599720UL, 967735248UL, 96757260UL, 603332790UL, 862877297UL, 760110148UL,
4291 468436420UL, 401799145UL, 299404568UL, 230258509UL
4292};
4293#endif
4294/* _mpd_ln10 is used directly for precisions smaller than MINALLOC_MAX*RDIGITS.
4295 Otherwise, it serves as the initial approximation for calculating ln(10). */
4296static const mpd_t _mpd_ln10 = {
4297 MPD_STATIC|MPD_CONST_DATA, -(MPD_MINALLOC_MAX*MPD_RDIGITS-1),
4298 MPD_MINALLOC_MAX*MPD_RDIGITS, MPD_MINALLOC_MAX, MPD_MINALLOC_MAX,
4299 (mpd_uint_t *)mpd_ln10_data
4300};
4301
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004302/*
4303 * Set 'result' to log(10).
4304 * Ulp error: abs(result - log(10)) < ulp(log(10))
4305 * Relative error : abs(result - log(10)) < 5 * 10**-prec * log(10)
4306 *
4307 * NOTE: The relative error is not derived from the ulp error, but
4308 * calculated separately using the fact that 23/10 < log(10) < 24/10.
4309 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004310void
4311mpd_qln10(mpd_t *result, mpd_ssize_t prec, uint32_t *status)
4312{
4313 mpd_context_t varcontext, maxcontext;
4314 MPD_NEW_STATIC(tmp, 0,0,0,0);
4315 MPD_NEW_CONST(static10, 0,0,2,1,1,10);
4316 mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
4317 mpd_uint_t rnd;
4318 mpd_ssize_t shift;
4319 int i;
4320
4321 assert(prec >= 1);
4322
4323 shift = MPD_MINALLOC_MAX*MPD_RDIGITS-prec;
4324 shift = shift < 0 ? 0 : shift;
4325
4326 rnd = mpd_qshiftr(result, &_mpd_ln10, shift, status);
4327 if (rnd == MPD_UINT_MAX) {
4328 mpd_seterror(result, MPD_Malloc_error, status);
4329 return;
4330 }
4331 result->exp = -(result->digits-1);
4332
4333 mpd_maxcontext(&maxcontext);
4334 if (prec < MPD_MINALLOC_MAX*MPD_RDIGITS) {
4335 maxcontext.prec = prec;
4336 _mpd_apply_round_excess(result, rnd, &maxcontext, status);
4337 *status |= (MPD_Inexact|MPD_Rounded);
4338 return;
4339 }
4340
4341 mpd_maxcontext(&varcontext);
4342 varcontext.round = MPD_ROUND_TRUNC;
4343
Stefan Kraha3394bc2012-06-06 15:57:18 +02004344 i = ln_schedule_prec(klist, prec+2, -result->exp);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004345 for (; i >= 0; i--) {
4346 varcontext.prec = 2*klist[i]+3;
4347 result->flags ^= MPD_NEG;
4348 _mpd_qexp(&tmp, result, &varcontext, status);
4349 result->flags ^= MPD_NEG;
4350 mpd_qmul(&tmp, &static10, &tmp, &varcontext, status);
4351 mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
4352 mpd_qadd(result, result, &tmp, &maxcontext, status);
4353 if (mpd_isspecial(result)) {
4354 break;
4355 }
4356 }
4357
4358 mpd_del(&tmp);
4359 maxcontext.prec = prec;
4360 mpd_qfinalize(result, &maxcontext, status);
4361}
4362
Stefan Kraha3394bc2012-06-06 15:57:18 +02004363/*
4364 * Initial approximations for the ln() iteration. The values have the
4365 * following properties (established with both decimal.py and mpfr):
4366 *
4367 * Index 0 - 400, logarithms of x in [1.00, 5.00]:
4368 * abs(lnapprox[i] * 10**-3 - log((i+100)/100)) < 10**-2
4369 * abs(lnapprox[i] * 10**-3 - log((i+1+100)/100)) < 10**-2
4370 *
4371 * Index 401 - 899, logarithms of x in (0.500, 0.999]:
4372 * abs(-lnapprox[i] * 10**-3 - log((i+100)/1000)) < 10**-2
4373 * abs(-lnapprox[i] * 10**-3 - log((i+1+100)/1000)) < 10**-2
4374 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004375static const uint16_t lnapprox[900] = {
4376 /* index 0 - 400: log((i+100)/100) * 1000 */
4377 0, 10, 20, 30, 39, 49, 58, 68, 77, 86, 95, 104, 113, 122, 131, 140, 148, 157,
4378 166, 174, 182, 191, 199, 207, 215, 223, 231, 239, 247, 255, 262, 270, 278,
4379 285, 293, 300, 308, 315, 322, 329, 336, 344, 351, 358, 365, 372, 378, 385,
4380 392, 399, 406, 412, 419, 425, 432, 438, 445, 451, 457, 464, 470, 476, 482,
4381 489, 495, 501, 507, 513, 519, 525, 531, 536, 542, 548, 554, 560, 565, 571,
4382 577, 582, 588, 593, 599, 604, 610, 615, 621, 626, 631, 637, 642, 647, 652,
4383 658, 663, 668, 673, 678, 683, 688, 693, 698, 703, 708, 713, 718, 723, 728,
4384 732, 737, 742, 747, 751, 756, 761, 766, 770, 775, 779, 784, 788, 793, 798,
4385 802, 806, 811, 815, 820, 824, 829, 833, 837, 842, 846, 850, 854, 859, 863,
4386 867, 871, 876, 880, 884, 888, 892, 896, 900, 904, 908, 912, 916, 920, 924,
4387 928, 932, 936, 940, 944, 948, 952, 956, 959, 963, 967, 971, 975, 978, 982,
4388 986, 990, 993, 997, 1001, 1004, 1008, 1012, 1015, 1019, 1022, 1026, 1030,
4389 1033, 1037, 1040, 1044, 1047, 1051, 1054, 1058, 1061, 1065, 1068, 1072, 1075,
4390 1078, 1082, 1085, 1089, 1092, 1095, 1099, 1102, 1105, 1109, 1112, 1115, 1118,
4391 1122, 1125, 1128, 1131, 1135, 1138, 1141, 1144, 1147, 1151, 1154, 1157, 1160,
4392 1163, 1166, 1169, 1172, 1176, 1179, 1182, 1185, 1188, 1191, 1194, 1197, 1200,
4393 1203, 1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227, 1230, 1233, 1235, 1238,
4394 1241, 1244, 1247, 1250, 1253, 1256, 1258, 1261, 1264, 1267, 1270, 1273, 1275,
4395 1278, 1281, 1284, 1286, 1289, 1292, 1295, 1297, 1300, 1303, 1306, 1308, 1311,
4396 1314, 1316, 1319, 1322, 1324, 1327, 1330, 1332, 1335, 1338, 1340, 1343, 1345,
4397 1348, 1351, 1353, 1356, 1358, 1361, 1364, 1366, 1369, 1371, 1374, 1376, 1379,
4398 1381, 1384, 1386, 1389, 1391, 1394, 1396, 1399, 1401, 1404, 1406, 1409, 1411,
4399 1413, 1416, 1418, 1421, 1423, 1426, 1428, 1430, 1433, 1435, 1437, 1440, 1442,
4400 1445, 1447, 1449, 1452, 1454, 1456, 1459, 1461, 1463, 1466, 1468, 1470, 1472,
4401 1475, 1477, 1479, 1482, 1484, 1486, 1488, 1491, 1493, 1495, 1497, 1500, 1502,
4402 1504, 1506, 1509, 1511, 1513, 1515, 1517, 1520, 1522, 1524, 1526, 1528, 1530,
4403 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1548, 1550, 1552, 1554, 1556, 1558,
4404 1560, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585,
4405 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609,
4406 /* index 401 - 899: -log((i+100)/1000) * 1000 */
4407 691, 689, 687, 685, 683, 681, 679, 677, 675, 673, 671, 669, 668, 666, 664,
4408 662, 660, 658, 656, 654, 652, 650, 648, 646, 644, 642, 641, 639, 637, 635,
4409 633, 631, 629, 627, 626, 624, 622, 620, 618, 616, 614, 612, 611, 609, 607,
4410 605, 603, 602, 600, 598, 596, 594, 592, 591, 589, 587, 585, 583, 582, 580,
4411 578, 576, 574, 573, 571, 569, 567, 566, 564, 562, 560, 559, 557, 555, 553,
4412 552, 550, 548, 546, 545, 543, 541, 540, 538, 536, 534, 533, 531, 529, 528,
4413 526, 524, 523, 521, 519, 518, 516, 514, 512, 511, 509, 508, 506, 504, 502,
4414 501, 499, 498, 496, 494, 493, 491, 489, 488, 486, 484, 483, 481, 480, 478,
4415 476, 475, 473, 472, 470, 468, 467, 465, 464, 462, 460, 459, 457, 456, 454,
4416 453, 451, 449, 448, 446, 445, 443, 442, 440, 438, 437, 435, 434, 432, 431,
4417 429, 428, 426, 425, 423, 422, 420, 419, 417, 416, 414, 412, 411, 410, 408,
4418 406, 405, 404, 402, 400, 399, 398, 396, 394, 393, 392, 390, 389, 387, 386,
4419 384, 383, 381, 380, 378, 377, 375, 374, 372, 371, 370, 368, 367, 365, 364,
4420 362, 361, 360, 358, 357, 355, 354, 352, 351, 350, 348, 347, 345, 344, 342,
4421 341, 340, 338, 337, 336, 334, 333, 331, 330, 328, 327, 326, 324, 323, 322,
4422 320, 319, 318, 316, 315, 313, 312, 311, 309, 308, 306, 305, 304, 302, 301,
4423 300, 298, 297, 296, 294, 293, 292, 290, 289, 288, 286, 285, 284, 282, 281,
4424 280, 278, 277, 276, 274, 273, 272, 270, 269, 268, 267, 265, 264, 263, 261,
4425 260, 259, 258, 256, 255, 254, 252, 251, 250, 248, 247, 246, 245, 243, 242,
4426 241, 240, 238, 237, 236, 234, 233, 232, 231, 229, 228, 227, 226, 224, 223,
4427 222, 221, 219, 218, 217, 216, 214, 213, 212, 211, 210, 208, 207, 206, 205,
4428 203, 202, 201, 200, 198, 197, 196, 195, 194, 192, 191, 190, 189, 188, 186,
4429 185, 184, 183, 182, 180, 179, 178, 177, 176, 174, 173, 172, 171, 170, 168,
4430 167, 166, 165, 164, 162, 161, 160, 159, 158, 157, 156, 154, 153, 152, 151,
4431 150, 148, 147, 146, 145, 144, 143, 142, 140, 139, 138, 137, 136, 135, 134,
4432 132, 131, 130, 129, 128, 127, 126, 124, 123, 122, 121, 120, 119, 118, 116,
4433 115, 114, 113, 112, 111, 110, 109, 108, 106, 105, 104, 103, 102, 101, 100,
4434 99, 98, 97, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 84, 83, 82, 81, 80, 79,
4435 78, 77, 76, 75, 74, 73, 72, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59,
4436 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39,
4437 38, 37, 36, 35, 34, 33, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
4438 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
4439};
4440
Stefan Kraha3394bc2012-06-06 15:57:18 +02004441/*
4442 * Internal ln() function that does not check for specials, zero or one.
4443 * Relative error: abs(result - log(a)) < 0.1 * 10**-prec * abs(log(a))
4444 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004445static void
4446_mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4447 uint32_t *status)
4448{
4449 mpd_context_t varcontext, maxcontext;
4450 mpd_t *z = (mpd_t *) result;
4451 MPD_NEW_STATIC(v,0,0,0,0);
4452 MPD_NEW_STATIC(vtmp,0,0,0,0);
4453 MPD_NEW_STATIC(tmp,0,0,0,0);
4454 mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
4455 mpd_ssize_t maxprec, shift, t;
4456 mpd_ssize_t a_digits, a_exp;
4457 mpd_uint_t dummy, x;
4458 int i;
4459
4460 assert(!mpd_isspecial(a) && !mpd_iszerocoeff(a));
4461
4462 /*
4463 * We are calculating ln(a) = ln(v * 10^t) = ln(v) + t*ln(10),
4464 * where 0.5 < v <= 5.
4465 */
4466 if (!mpd_qcopy(&v, a, status)) {
4467 mpd_seterror(result, MPD_Malloc_error, status);
4468 goto finish;
4469 }
4470
4471 /* Initial approximation: we have at least one non-zero digit */
4472 _mpd_get_msdigits(&dummy, &x, &v, 3);
4473 if (x < 10) x *= 10;
4474 if (x < 100) x *= 10;
4475 x -= 100;
4476
4477 /* a may equal z */
4478 a_digits = a->digits;
4479 a_exp = a->exp;
4480
4481 mpd_minalloc(z);
4482 mpd_clear_flags(z);
4483 z->data[0] = lnapprox[x];
4484 z->len = 1;
4485 z->exp = -3;
4486 mpd_setdigits(z);
4487
4488 if (x <= 400) {
Stefan Kraha3394bc2012-06-06 15:57:18 +02004489 /* Reduce the input operand to 1.00 <= v <= 5.00. Let y = x + 100,
4490 * so 100 <= y <= 500. Since y contains the most significant digits
4491 * of v, y/100 <= v < (y+1)/100 and abs(z - log(v)) < 10**-2. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004492 v.exp = -(a_digits - 1);
4493 t = a_exp + a_digits - 1;
4494 }
4495 else {
Stefan Kraha3394bc2012-06-06 15:57:18 +02004496 /* Reduce the input operand to 0.500 < v <= 0.999. Let y = x + 100,
4497 * so 500 < y <= 999. Since y contains the most significant digits
4498 * of v, y/1000 <= v < (y+1)/1000 and abs(z - log(v)) < 10**-2. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004499 v.exp = -a_digits;
4500 t = a_exp + a_digits;
4501 mpd_set_negative(z);
4502 }
4503
4504 mpd_maxcontext(&maxcontext);
4505 mpd_maxcontext(&varcontext);
4506 varcontext.round = MPD_ROUND_TRUNC;
4507
4508 maxprec = ctx->prec + 2;
Stefan Kraha3394bc2012-06-06 15:57:18 +02004509 if (t == 0 && (x <= 15 || x >= 800)) {
4510 /* 0.900 <= v <= 1.15: Estimate the magnitude of the logarithm.
4511 * If ln(v) will underflow, skip the loop. Otherwise, adjust the
4512 * precision upwards in order to obtain a sufficient number of
4513 * significant digits.
Stefan Krah1919b7e2012-03-21 18:25:23 +01004514 *
Stefan Kraha3394bc2012-06-06 15:57:18 +02004515 * Case v > 1:
4516 * abs((v-1)/10) < abs((v-1)/v) < abs(ln(v)) < abs(v-1)
4517 * Case v < 1:
4518 * abs(v-1) < abs(ln(v)) < abs((v-1)/v) < abs((v-1)*10)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004519 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004520 int cmp = _mpd_cmp(&v, &one);
4521
Stefan Kraha3394bc2012-06-06 15:57:18 +02004522 /* Upper bound (assume v > 1): abs(v-1), unrounded */
4523 _mpd_qsub(&tmp, &v, &one, &maxcontext, &maxcontext.status);
4524 if (maxcontext.status & MPD_Errors) {
4525 mpd_seterror(result, MPD_Malloc_error, status);
4526 goto finish;
4527 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01004528
4529 if (cmp < 0) {
Stefan Kraha3394bc2012-06-06 15:57:18 +02004530 /* v < 1: abs((v-1)*10) */
4531 tmp.exp += 1;
Stefan Krah1919b7e2012-03-21 18:25:23 +01004532 }
Stefan Kraha3394bc2012-06-06 15:57:18 +02004533 if (mpd_adjexp(&tmp) < mpd_etiny(ctx)) {
4534 /* The upper bound is less than etiny: Underflow to zero */
4535 _settriple(result, (cmp<0), 1, mpd_etiny(ctx)-1);
4536 goto finish;
Stefan Krah1919b7e2012-03-21 18:25:23 +01004537 }
Stefan Kraha3394bc2012-06-06 15:57:18 +02004538 /* Lower bound: abs((v-1)/10) or abs(v-1) */
4539 tmp.exp -= 1;
4540 if (mpd_adjexp(&tmp) < 0) {
4541 /* Absolute error of the loop: abs(z - log(v)) < 10**-p. If
4542 * p = ctx->prec+2-adjexp(lower), then the relative error of
4543 * the result is (using 10**adjexp(x) <= abs(x)):
4544 *
4545 * abs(z - log(v)) / abs(log(v)) < 10**-p / abs(log(v))
4546 * <= 10**(-ctx->prec-2)
4547 */
4548 maxprec = maxprec - mpd_adjexp(&tmp);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004549 }
4550 }
4551
4552 i = ln_schedule_prec(klist, maxprec, 2);
4553 for (; i >= 0; i--) {
4554 varcontext.prec = 2*klist[i]+3;
4555 z->flags ^= MPD_NEG;
4556 _mpd_qexp(&tmp, z, &varcontext, status);
4557 z->flags ^= MPD_NEG;
4558
4559 if (v.digits > varcontext.prec) {
4560 shift = v.digits - varcontext.prec;
4561 mpd_qshiftr(&vtmp, &v, shift, status);
4562 vtmp.exp += shift;
4563 mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status);
4564 }
4565 else {
4566 mpd_qmul(&tmp, &v, &tmp, &varcontext, status);
4567 }
4568
4569 mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
4570 mpd_qadd(z, z, &tmp, &maxcontext, status);
4571 if (mpd_isspecial(z)) {
4572 break;
4573 }
4574 }
4575
Stefan Kraha3394bc2012-06-06 15:57:18 +02004576 /*
4577 * Case t == 0:
4578 * t * log(10) == 0, the result does not change and the analysis
4579 * above applies. If v < 0.900 or v > 1.15, the relative error is
4580 * less than 10**(-ctx.prec-1).
4581 * Case t != 0:
4582 * z := approx(log(v))
4583 * y := approx(log(10))
4584 * p := maxprec = ctx->prec + 2
4585 * Absolute errors:
4586 * 1) abs(z - log(v)) < 10**-p
4587 * 2) abs(y - log(10)) < 10**-p
4588 * The multiplication is exact, so:
4589 * 3) abs(t*y - t*log(10)) < t*10**-p
4590 * The sum is exact, so:
4591 * 4) abs((z + t*y) - (log(v) + t*log(10))) < (abs(t) + 1) * 10**-p
4592 * Bounds for log(v) and log(10):
4593 * 5) -7/10 < log(v) < 17/10
4594 * 6) 23/10 < log(10) < 24/10
4595 * Using 4), 5), 6) and t != 0, the relative error is:
4596 *
4597 * 7) relerr < ((abs(t) + 1)*10**-p) / abs(log(v) + t*log(10))
4598 * < 0.5 * 10**(-p + 1) = 0.5 * 10**(-ctx->prec-1)
4599 */
4600 mpd_qln10(&v, maxprec+1, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004601 mpd_qmul_ssize(&tmp, &v, t, &maxcontext, status);
Stefan Kraha3394bc2012-06-06 15:57:18 +02004602 mpd_qadd(result, &tmp, z, &maxcontext, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004603
4604
4605finish:
Stefan Kraha3394bc2012-06-06 15:57:18 +02004606 *status |= (MPD_Inexact|MPD_Rounded);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004607 mpd_del(&v);
4608 mpd_del(&vtmp);
4609 mpd_del(&tmp);
4610}
4611
4612/* ln(a) */
4613void
4614mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4615 uint32_t *status)
4616{
4617 mpd_context_t workctx;
4618 mpd_ssize_t adjexp, t;
4619
4620 if (mpd_isspecial(a)) {
4621 if (mpd_qcheck_nan(result, a, ctx, status)) {
4622 return;
4623 }
4624 if (mpd_isnegative(a)) {
4625 mpd_seterror(result, MPD_Invalid_operation, status);
4626 return;
4627 }
4628 mpd_setspecial(result, MPD_POS, MPD_INF);
4629 return;
4630 }
4631 if (mpd_iszerocoeff(a)) {
4632 mpd_setspecial(result, MPD_NEG, MPD_INF);
4633 return;
4634 }
4635 if (mpd_isnegative(a)) {
4636 mpd_seterror(result, MPD_Invalid_operation, status);
4637 return;
4638 }
4639 if (_mpd_cmp(a, &one) == 0) {
4640 _settriple(result, MPD_POS, 0, 0);
4641 return;
4642 }
Stefan Krah7bda2652012-06-07 17:48:47 +02004643 /*
4644 * Check if the result will overflow (0 < x, x != 1):
4645 * 1) log10(x) < 0 iff adjexp(x) < 0
4646 * 2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
4647 * 3) 0 < x /\ x != 1 ==> 2 * abs(log10(x)) < abs(log(x))
4648 * 4) adjexp(x) <= log10(x) < adjexp(x) + 1
Stefan Krah1919b7e2012-03-21 18:25:23 +01004649 *
Stefan Krah7bda2652012-06-07 17:48:47 +02004650 * Case adjexp(x) >= 0:
4651 * 5) 2 * adjexp(x) < abs(log(x))
4652 * Case adjexp(x) > 0:
4653 * 6) adjexp(2 * adjexp(x)) <= adjexp(abs(log(x)))
4654 * Case adjexp(x) == 0:
4655 * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004656 *
Stefan Krah7bda2652012-06-07 17:48:47 +02004657 * Case adjexp(x) < 0:
4658 * 7) 2 * (-adjexp(x) - 1) < abs(log(x))
4659 * Case adjexp(x) < -1:
4660 * 8) adjexp(2 * (-adjexp(x) - 1)) <= adjexp(abs(log(x)))
4661 * Case adjexp(x) == -1:
4662 * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004663 */
4664 adjexp = mpd_adjexp(a);
4665 t = (adjexp < 0) ? -adjexp-1 : adjexp;
4666 t *= 2;
4667 if (mpd_exp_digits(t)-1 > ctx->emax) {
4668 *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
4669 mpd_setspecial(result, (adjexp<0), MPD_INF);
4670 return;
4671 }
4672
4673 workctx = *ctx;
4674 workctx.round = MPD_ROUND_HALF_EVEN;
4675
4676 if (ctx->allcr) {
4677 MPD_NEW_STATIC(t1, 0,0,0,0);
4678 MPD_NEW_STATIC(t2, 0,0,0,0);
4679 MPD_NEW_STATIC(ulp, 0,0,0,0);
4680 MPD_NEW_STATIC(aa, 0,0,0,0);
4681 mpd_ssize_t prec;
4682
4683 if (result == a) {
4684 if (!mpd_qcopy(&aa, a, status)) {
4685 mpd_seterror(result, MPD_Malloc_error, status);
4686 return;
4687 }
4688 a = &aa;
4689 }
4690
4691 workctx.clamp = 0;
4692 prec = ctx->prec + 3;
4693 while (1) {
4694 workctx.prec = prec;
4695 _mpd_qln(result, a, &workctx, status);
4696 _ssettriple(&ulp, MPD_POS, 1,
Stefan Krah7bda2652012-06-07 17:48:47 +02004697 result->exp + result->digits-workctx.prec);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004698
4699 workctx.prec = ctx->prec;
4700 mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
4701 mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
4702 if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
4703 mpd_qcmp(&t1, &t2, status) == 0) {
4704 workctx.clamp = ctx->clamp;
4705 mpd_check_underflow(result, &workctx, status);
4706 mpd_qfinalize(result, &workctx, status);
4707 break;
4708 }
4709 prec += MPD_RDIGITS;
4710 }
4711 mpd_del(&t1);
4712 mpd_del(&t2);
4713 mpd_del(&ulp);
4714 mpd_del(&aa);
4715 }
4716 else {
4717 _mpd_qln(result, a, &workctx, status);
4718 mpd_check_underflow(result, &workctx, status);
4719 mpd_qfinalize(result, &workctx, status);
4720 }
4721}
4722
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004723/*
4724 * Internal log10() function that does not check for specials, zero or one.
4725 * Case SKIP_FINALIZE:
4726 * Relative error: abs(result - log10(a)) < 0.1 * 10**-prec * abs(log10(a))
4727 * Case DO_FINALIZE:
4728 * Ulp error: abs(result - log10(a)) < ulp(log10(a))
4729 */
4730enum {SKIP_FINALIZE, DO_FINALIZE};
Stefan Krah1919b7e2012-03-21 18:25:23 +01004731static void
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004732_mpd_qlog10(int action, mpd_t *result, const mpd_t *a,
4733 const mpd_context_t *ctx, uint32_t *status)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004734{
4735 mpd_context_t workctx;
4736 MPD_NEW_STATIC(ln10,0,0,0,0);
4737
4738 mpd_maxcontext(&workctx);
4739 workctx.prec = ctx->prec + 3;
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004740 /* relative error: 0.1 * 10**(-p-3). The specific underflow shortcut
4741 * in _mpd_qln() does not change the final result. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004742 _mpd_qln(result, a, &workctx, status);
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004743 /* relative error: 5 * 10**(-p-3) */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004744 mpd_qln10(&ln10, workctx.prec, status);
4745
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004746 if (action == DO_FINALIZE) {
4747 workctx = *ctx;
4748 workctx.round = MPD_ROUND_HALF_EVEN;
4749 }
4750 /* SKIP_FINALIZE: relative error: 5 * 10**(-p-3) */
Stefan Krah1919b7e2012-03-21 18:25:23 +01004751 _mpd_qdiv(NO_IDEAL_EXP, result, result, &ln10, &workctx, status);
4752
4753 mpd_del(&ln10);
4754}
4755
4756/* log10(a) */
4757void
4758mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
4759 uint32_t *status)
4760{
4761 mpd_context_t workctx;
4762 mpd_ssize_t adjexp, t;
4763
4764 workctx = *ctx;
4765 workctx.round = MPD_ROUND_HALF_EVEN;
4766
4767 if (mpd_isspecial(a)) {
4768 if (mpd_qcheck_nan(result, a, ctx, status)) {
4769 return;
4770 }
4771 if (mpd_isnegative(a)) {
4772 mpd_seterror(result, MPD_Invalid_operation, status);
4773 return;
4774 }
4775 mpd_setspecial(result, MPD_POS, MPD_INF);
4776 return;
4777 }
4778 if (mpd_iszerocoeff(a)) {
4779 mpd_setspecial(result, MPD_NEG, MPD_INF);
4780 return;
4781 }
4782 if (mpd_isnegative(a)) {
4783 mpd_seterror(result, MPD_Invalid_operation, status);
4784 return;
4785 }
4786 if (mpd_coeff_ispow10(a)) {
4787 uint8_t sign = 0;
4788 adjexp = mpd_adjexp(a);
4789 if (adjexp < 0) {
4790 sign = 1;
4791 adjexp = -adjexp;
4792 }
4793 _settriple(result, sign, adjexp, 0);
4794 mpd_qfinalize(result, &workctx, status);
4795 return;
4796 }
Stefan Krah5248a2d2012-06-09 00:01:28 +02004797 /*
4798 * Check if the result will overflow (0 < x, x != 1):
4799 * 1) log10(x) < 0 iff adjexp(x) < 0
4800 * 2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
4801 * 3) adjexp(x) <= log10(x) < adjexp(x) + 1
Stefan Krah1919b7e2012-03-21 18:25:23 +01004802 *
Stefan Krah5248a2d2012-06-09 00:01:28 +02004803 * Case adjexp(x) >= 0:
4804 * 4) adjexp(x) <= abs(log10(x))
4805 * Case adjexp(x) > 0:
4806 * 5) adjexp(adjexp(x)) <= adjexp(abs(log10(x)))
4807 * Case adjexp(x) == 0:
4808 * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004809 *
Stefan Krah5248a2d2012-06-09 00:01:28 +02004810 * Case adjexp(x) < 0:
4811 * 6) -adjexp(x) - 1 < abs(log10(x))
4812 * Case adjexp(x) < -1:
4813 * 7) adjexp(-adjexp(x) - 1) <= adjexp(abs(log(x)))
4814 * Case adjexp(x) == -1:
4815 * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
Stefan Krah1919b7e2012-03-21 18:25:23 +01004816 */
4817 adjexp = mpd_adjexp(a);
4818 t = (adjexp < 0) ? -adjexp-1 : adjexp;
4819 if (mpd_exp_digits(t)-1 > ctx->emax) {
4820 *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
4821 mpd_setspecial(result, (adjexp<0), MPD_INF);
4822 return;
4823 }
4824
4825 if (ctx->allcr) {
4826 MPD_NEW_STATIC(t1, 0,0,0,0);
4827 MPD_NEW_STATIC(t2, 0,0,0,0);
4828 MPD_NEW_STATIC(ulp, 0,0,0,0);
4829 MPD_NEW_STATIC(aa, 0,0,0,0);
4830 mpd_ssize_t prec;
4831
4832 if (result == a) {
4833 if (!mpd_qcopy(&aa, a, status)) {
4834 mpd_seterror(result, MPD_Malloc_error, status);
4835 return;
4836 }
4837 a = &aa;
4838 }
4839
4840 workctx.clamp = 0;
4841 prec = ctx->prec + 3;
4842 while (1) {
4843 workctx.prec = prec;
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004844 _mpd_qlog10(SKIP_FINALIZE, result, a, &workctx, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004845 _ssettriple(&ulp, MPD_POS, 1,
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004846 result->exp + result->digits-workctx.prec);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004847
4848 workctx.prec = ctx->prec;
4849 mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
4850 mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
4851 if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
4852 mpd_qcmp(&t1, &t2, status) == 0) {
4853 workctx.clamp = ctx->clamp;
4854 mpd_check_underflow(result, &workctx, status);
4855 mpd_qfinalize(result, &workctx, status);
4856 break;
4857 }
4858 prec += MPD_RDIGITS;
4859 }
4860 mpd_del(&t1);
4861 mpd_del(&t2);
4862 mpd_del(&ulp);
4863 mpd_del(&aa);
4864 }
4865 else {
Stefan Krah1cf6dfc2012-06-08 18:41:33 +02004866 _mpd_qlog10(DO_FINALIZE, result, a, &workctx, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01004867 mpd_check_underflow(result, &workctx, status);
4868 }
4869}
4870
4871/*
4872 * Maximum of the two operands. Attention: If one operand is a quiet NaN and the
4873 * other is numeric, the numeric operand is returned. This may not be what one
4874 * expects.
4875 */
4876void
4877mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b,
4878 const mpd_context_t *ctx, uint32_t *status)
4879{
4880 int c;
4881
4882 if (mpd_isqnan(a) && !mpd_isnan(b)) {
4883 mpd_qcopy(result, b, status);
4884 }
4885 else if (mpd_isqnan(b) && !mpd_isnan(a)) {
4886 mpd_qcopy(result, a, status);
4887 }
4888 else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
4889 return;
4890 }
4891 else {
4892 c = _mpd_cmp(a, b);
4893 if (c == 0) {
4894 c = _mpd_cmp_numequal(a, b);
4895 }
4896
4897 if (c < 0) {
4898 mpd_qcopy(result, b, status);
4899 }
4900 else {
4901 mpd_qcopy(result, a, status);
4902 }
4903 }
4904
4905 mpd_qfinalize(result, ctx, status);
4906}
4907
4908/*
4909 * Maximum magnitude: Same as mpd_max(), but compares the operands with their
4910 * sign ignored.
4911 */
4912void
4913mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
4914 const mpd_context_t *ctx, uint32_t *status)
4915{
4916 int c;
4917
4918 if (mpd_isqnan(a) && !mpd_isnan(b)) {
4919 mpd_qcopy(result, b, status);
4920 }
4921 else if (mpd_isqnan(b) && !mpd_isnan(a)) {
4922 mpd_qcopy(result, a, status);
4923 }
4924 else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
4925 return;
4926 }
4927 else {
4928 c = _mpd_cmp_abs(a, b);
4929 if (c == 0) {
4930 c = _mpd_cmp_numequal(a, b);
4931 }
4932
4933 if (c < 0) {
4934 mpd_qcopy(result, b, status);
4935 }
4936 else {
4937 mpd_qcopy(result, a, status);
4938 }
4939 }
4940
4941 mpd_qfinalize(result, ctx, status);
4942}
4943
4944/*
4945 * Minimum of the two operands. Attention: If one operand is a quiet NaN and the
4946 * other is numeric, the numeric operand is returned. This may not be what one
4947 * expects.
4948 */
4949void
4950mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b,
4951 const mpd_context_t *ctx, uint32_t *status)
4952{
4953 int c;
4954
4955 if (mpd_isqnan(a) && !mpd_isnan(b)) {
4956 mpd_qcopy(result, b, status);
4957 }
4958 else if (mpd_isqnan(b) && !mpd_isnan(a)) {
4959 mpd_qcopy(result, a, status);
4960 }
4961 else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
4962 return;
4963 }
4964 else {
4965 c = _mpd_cmp(a, b);
4966 if (c == 0) {
4967 c = _mpd_cmp_numequal(a, b);
4968 }
4969
4970 if (c < 0) {
4971 mpd_qcopy(result, a, status);
4972 }
4973 else {
4974 mpd_qcopy(result, b, status);
4975 }
4976 }
4977
4978 mpd_qfinalize(result, ctx, status);
4979}
4980
4981/*
4982 * Minimum magnitude: Same as mpd_min(), but compares the operands with their
4983 * sign ignored.
4984 */
4985void
4986mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
4987 const mpd_context_t *ctx, uint32_t *status)
4988{
4989 int c;
4990
4991 if (mpd_isqnan(a) && !mpd_isnan(b)) {
4992 mpd_qcopy(result, b, status);
4993 }
4994 else if (mpd_isqnan(b) && !mpd_isnan(a)) {
4995 mpd_qcopy(result, a, status);
4996 }
4997 else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
4998 return;
4999 }
5000 else {
5001 c = _mpd_cmp_abs(a, b);
5002 if (c == 0) {
5003 c = _mpd_cmp_numequal(a, b);
5004 }
5005
5006 if (c < 0) {
5007 mpd_qcopy(result, a, status);
5008 }
5009 else {
5010 mpd_qcopy(result, b, status);
5011 }
5012 }
5013
5014 mpd_qfinalize(result, ctx, status);
5015}
5016
5017/* Minimum space needed for the result array in _karatsuba_rec(). */
5018static inline mpd_size_t
5019_kmul_resultsize(mpd_size_t la, mpd_size_t lb)
5020{
5021 mpd_size_t n, m;
5022
5023 n = add_size_t(la, lb);
5024 n = add_size_t(n, 1);
5025
5026 m = (la+1)/2 + 1;
5027 m = mul_size_t(m, 3);
5028
5029 return (m > n) ? m : n;
5030}
5031
5032/* Work space needed in _karatsuba_rec(). lim >= 4 */
5033static inline mpd_size_t
5034_kmul_worksize(mpd_size_t n, mpd_size_t lim)
5035{
5036 mpd_size_t m;
5037
5038 if (n <= lim) {
5039 return 0;
5040 }
5041
5042 m = (n+1)/2 + 1;
5043
5044 return add_size_t(mul_size_t(m, 2), _kmul_worksize(m, lim));
5045}
5046
5047
5048#define MPD_KARATSUBA_BASECASE 16 /* must be >= 4 */
5049
5050/*
5051 * Add the product of a and b to c.
5052 * c must be _kmul_resultsize(la, lb) in size.
5053 * w is used as a work array and must be _kmul_worksize(a, lim) in size.
5054 * Roman E. Maeder, Storage Allocation for the Karatsuba Integer Multiplication
5055 * Algorithm. In "Design and implementation of symbolic computation systems",
5056 * Springer, 1993, ISBN 354057235X, 9783540572350.
5057 */
5058static void
5059_karatsuba_rec(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
5060 mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
5061{
5062 mpd_size_t m, lt;
5063
5064 assert(la >= lb && lb > 0);
5065 assert(la <= MPD_KARATSUBA_BASECASE || w != NULL);
5066
5067 if (la <= MPD_KARATSUBA_BASECASE) {
5068 _mpd_basemul(c, a, b, la, lb);
5069 return;
5070 }
5071
5072 m = (la+1)/2; // ceil(la/2)
5073
5074 /* lb <= m < la */
5075 if (lb <= m) {
5076
5077 /* lb can now be larger than la-m */
5078 if (lb > la-m) {
5079 lt = lb + lb + 1; // space needed for result array
5080 mpd_uint_zero(w, lt); // clear result array
5081 _karatsuba_rec(w, b, a+m, w+lt, lb, la-m); // b*ah
5082 }
5083 else {
5084 lt = (la-m) + (la-m) + 1; // space needed for result array
5085 mpd_uint_zero(w, lt); // clear result array
5086 _karatsuba_rec(w, a+m, b, w+lt, la-m, lb); // ah*b
5087 }
5088 _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
5089
5090 lt = m + m + 1; // space needed for the result array
5091 mpd_uint_zero(w, lt); // clear result array
5092 _karatsuba_rec(w, a, b, w+lt, m, lb); // al*b
5093 _mpd_baseaddto(c, w, m+lb); // add al*b
5094
5095 return;
5096 }
5097
5098 /* la >= lb > m */
5099 memcpy(w, a, m * sizeof *w);
5100 w[m] = 0;
5101 _mpd_baseaddto(w, a+m, la-m);
5102
5103 memcpy(w+(m+1), b, m * sizeof *w);
5104 w[m+1+m] = 0;
5105 _mpd_baseaddto(w+(m+1), b+m, lb-m);
5106
5107 _karatsuba_rec(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1);
5108
5109 lt = (la-m) + (la-m) + 1;
5110 mpd_uint_zero(w, lt);
5111
5112 _karatsuba_rec(w, a+m, b+m, w+lt, la-m, lb-m);
5113
5114 _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
5115 _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
5116
5117 lt = m + m + 1;
5118 mpd_uint_zero(w, lt);
5119
5120 _karatsuba_rec(w, a, b, w+lt, m, m);
5121 _mpd_baseaddto(c, w, m+m);
5122 _mpd_basesubfrom(c+m, w, m+m);
5123
5124 return;
5125}
5126
5127/*
5128 * Multiply u and v, using Karatsuba multiplication. Returns a pointer
5129 * to the result or NULL in case of failure (malloc error).
5130 * Conditions: ulen >= vlen, ulen >= 4
5131 */
5132mpd_uint_t *
5133_mpd_kmul(const mpd_uint_t *u, const mpd_uint_t *v,
5134 mpd_size_t ulen, mpd_size_t vlen,
5135 mpd_size_t *rsize)
5136{
5137 mpd_uint_t *result = NULL, *w = NULL;
5138 mpd_size_t m;
5139
5140 assert(ulen >= 4);
5141 assert(ulen >= vlen);
5142
5143 *rsize = _kmul_resultsize(ulen, vlen);
5144 if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
5145 return NULL;
5146 }
5147
5148 m = _kmul_worksize(ulen, MPD_KARATSUBA_BASECASE);
5149 if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
5150 mpd_free(result);
5151 return NULL;
5152 }
5153
5154 _karatsuba_rec(result, u, v, w, ulen, vlen);
5155
5156
5157 if (w) mpd_free(w);
5158 return result;
5159}
5160
5161
Stefan Krah92538622012-06-10 16:50:55 +02005162/*
5163 * Determine the minimum length for the number theoretic transform. Valid
5164 * transform lengths are 2**n or 3*2**n, where 2**n <= MPD_MAXTRANSFORM_2N.
5165 * The function finds the shortest length m such that rsize <= m.
5166 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01005167static inline mpd_size_t
5168_mpd_get_transform_len(mpd_size_t rsize)
5169{
5170 mpd_size_t log2rsize;
5171 mpd_size_t x, step;
5172
5173 assert(rsize >= 4);
5174 log2rsize = mpd_bsr(rsize);
5175
5176 if (rsize <= 1024) {
Stefan Krah92538622012-06-10 16:50:55 +02005177 /* 2**n is faster in this range. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01005178 x = ((mpd_size_t)1)<<log2rsize;
5179 return (rsize == x) ? x : x<<1;
5180 }
5181 else if (rsize <= MPD_MAXTRANSFORM_2N) {
5182 x = ((mpd_size_t)1)<<log2rsize;
5183 if (rsize == x) return x;
5184 step = x>>1;
5185 x += step;
5186 return (rsize <= x) ? x : x + step;
5187 }
5188 else if (rsize <= MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2) {
5189 return MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2;
5190 }
5191 else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
5192 return 3*MPD_MAXTRANSFORM_2N;
5193 }
5194 else {
5195 return MPD_SIZE_MAX;
5196 }
5197}
5198
5199#ifdef PPRO
5200#ifndef _MSC_VER
5201static inline unsigned short
5202_mpd_get_control87(void)
5203{
5204 unsigned short cw;
5205
5206 __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
5207 return cw;
5208}
5209
5210static inline void
5211_mpd_set_control87(unsigned short cw)
5212{
5213 __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
5214}
5215#endif
5216
5217unsigned int
5218mpd_set_fenv(void)
5219{
5220 unsigned int cw;
5221#ifdef _MSC_VER
5222 unsigned int flags =
5223 _EM_INVALID|_EM_DENORMAL|_EM_ZERODIVIDE|_EM_OVERFLOW|
5224 _EM_UNDERFLOW|_EM_INEXACT|_RC_CHOP|_PC_64;
5225 unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
5226 unsigned int dummy;
5227
5228 __control87_2(0, 0, &cw, NULL);
5229 __control87_2(flags, mask, &dummy, NULL);
5230#else
5231 cw = _mpd_get_control87();
5232 _mpd_set_control87(cw|0xF3F);
5233#endif
5234 return cw;
5235}
5236
5237void
5238mpd_restore_fenv(unsigned int cw)
5239{
5240#ifdef _MSC_VER
5241 unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
5242 unsigned int dummy;
5243
5244 __control87_2(cw, mask, &dummy, NULL);
5245#else
5246 _mpd_set_control87((unsigned short)cw);
5247#endif
5248}
5249#endif /* PPRO */
5250
5251/*
5252 * Multiply u and v, using the fast number theoretic transform. Returns
5253 * a pointer to the result or NULL in case of failure (malloc error).
5254 */
5255mpd_uint_t *
5256_mpd_fntmul(const mpd_uint_t *u, const mpd_uint_t *v,
5257 mpd_size_t ulen, mpd_size_t vlen,
5258 mpd_size_t *rsize)
5259{
5260 mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL;
5261 mpd_size_t n;
5262
5263#ifdef PPRO
5264 unsigned int cw;
5265 cw = mpd_set_fenv();
5266#endif
5267
5268 *rsize = add_size_t(ulen, vlen);
5269 if ((n = _mpd_get_transform_len(*rsize)) == MPD_SIZE_MAX) {
5270 goto malloc_error;
5271 }
5272
Stefan Krah92538622012-06-10 16:50:55 +02005273 if ((c1 = mpd_calloc(n, sizeof *c1)) == NULL) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005274 goto malloc_error;
5275 }
Stefan Krah92538622012-06-10 16:50:55 +02005276 if ((c2 = mpd_calloc(n, sizeof *c2)) == NULL) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005277 goto malloc_error;
5278 }
Stefan Krah92538622012-06-10 16:50:55 +02005279 if ((c3 = mpd_calloc(n, sizeof *c3)) == NULL) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005280 goto malloc_error;
5281 }
5282
5283 memcpy(c1, u, ulen * (sizeof *c1));
5284 memcpy(c2, u, ulen * (sizeof *c2));
5285 memcpy(c3, u, ulen * (sizeof *c3));
5286
5287 if (u == v) {
5288 if (!fnt_autoconvolute(c1, n, P1) ||
5289 !fnt_autoconvolute(c2, n, P2) ||
5290 !fnt_autoconvolute(c3, n, P3)) {
5291 goto malloc_error;
5292 }
5293 }
5294 else {
Stefan Krah92538622012-06-10 16:50:55 +02005295 if ((vtmp = mpd_calloc(n, sizeof *vtmp)) == NULL) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005296 goto malloc_error;
5297 }
5298
5299 memcpy(vtmp, v, vlen * (sizeof *vtmp));
5300 if (!fnt_convolute(c1, vtmp, n, P1)) {
5301 mpd_free(vtmp);
5302 goto malloc_error;
5303 }
5304
5305 memcpy(vtmp, v, vlen * (sizeof *vtmp));
5306 mpd_uint_zero(vtmp+vlen, n-vlen);
5307 if (!fnt_convolute(c2, vtmp, n, P2)) {
5308 mpd_free(vtmp);
5309 goto malloc_error;
5310 }
5311
5312 memcpy(vtmp, v, vlen * (sizeof *vtmp));
5313 mpd_uint_zero(vtmp+vlen, n-vlen);
5314 if (!fnt_convolute(c3, vtmp, n, P3)) {
5315 mpd_free(vtmp);
5316 goto malloc_error;
5317 }
5318
5319 mpd_free(vtmp);
5320 }
5321
5322 crt3(c1, c2, c3, *rsize);
5323
5324out:
5325#ifdef PPRO
5326 mpd_restore_fenv(cw);
5327#endif
5328 if (c2) mpd_free(c2);
5329 if (c3) mpd_free(c3);
5330 return c1;
5331
5332malloc_error:
5333 if (c1) mpd_free(c1);
5334 c1 = NULL;
5335 goto out;
5336}
5337
5338
5339/*
5340 * Karatsuba multiplication with FNT/basemul as the base case.
5341 */
5342static int
5343_karatsuba_rec_fnt(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
5344 mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
5345{
5346 mpd_size_t m, lt;
5347
5348 assert(la >= lb && lb > 0);
5349 assert(la <= 3*(MPD_MAXTRANSFORM_2N/2) || w != NULL);
5350
5351 if (la <= 3*(MPD_MAXTRANSFORM_2N/2)) {
5352
5353 if (lb <= 192) {
5354 _mpd_basemul(c, b, a, lb, la);
5355 }
5356 else {
5357 mpd_uint_t *result;
5358 mpd_size_t dummy;
5359
5360 if ((result = _mpd_fntmul(a, b, la, lb, &dummy)) == NULL) {
5361 return 0;
5362 }
5363 memcpy(c, result, (la+lb) * (sizeof *result));
5364 mpd_free(result);
5365 }
5366 return 1;
5367 }
5368
5369 m = (la+1)/2; // ceil(la/2)
5370
5371 /* lb <= m < la */
5372 if (lb <= m) {
5373
5374 /* lb can now be larger than la-m */
5375 if (lb > la-m) {
5376 lt = lb + lb + 1; // space needed for result array
5377 mpd_uint_zero(w, lt); // clear result array
5378 if (!_karatsuba_rec_fnt(w, b, a+m, w+lt, lb, la-m)) { // b*ah
5379 return 0; /* GCOV_UNLIKELY */
5380 }
5381 }
5382 else {
5383 lt = (la-m) + (la-m) + 1; // space needed for result array
5384 mpd_uint_zero(w, lt); // clear result array
5385 if (!_karatsuba_rec_fnt(w, a+m, b, w+lt, la-m, lb)) { // ah*b
5386 return 0; /* GCOV_UNLIKELY */
5387 }
5388 }
5389 _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
5390
5391 lt = m + m + 1; // space needed for the result array
5392 mpd_uint_zero(w, lt); // clear result array
5393 if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, lb)) { // al*b
5394 return 0; /* GCOV_UNLIKELY */
5395 }
5396 _mpd_baseaddto(c, w, m+lb); // add al*b
5397
5398 return 1;
5399 }
5400
5401 /* la >= lb > m */
5402 memcpy(w, a, m * sizeof *w);
5403 w[m] = 0;
5404 _mpd_baseaddto(w, a+m, la-m);
5405
5406 memcpy(w+(m+1), b, m * sizeof *w);
5407 w[m+1+m] = 0;
5408 _mpd_baseaddto(w+(m+1), b+m, lb-m);
5409
5410 if (!_karatsuba_rec_fnt(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1)) {
5411 return 0; /* GCOV_UNLIKELY */
5412 }
5413
5414 lt = (la-m) + (la-m) + 1;
5415 mpd_uint_zero(w, lt);
5416
5417 if (!_karatsuba_rec_fnt(w, a+m, b+m, w+lt, la-m, lb-m)) {
5418 return 0; /* GCOV_UNLIKELY */
5419 }
5420
5421 _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
5422 _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
5423
5424 lt = m + m + 1;
5425 mpd_uint_zero(w, lt);
5426
5427 if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, m)) {
5428 return 0; /* GCOV_UNLIKELY */
5429 }
5430 _mpd_baseaddto(c, w, m+m);
5431 _mpd_basesubfrom(c+m, w, m+m);
5432
5433 return 1;
5434}
5435
5436/*
5437 * Multiply u and v, using Karatsuba multiplication with the FNT as the
5438 * base case. Returns a pointer to the result or NULL in case of failure
5439 * (malloc error). Conditions: ulen >= vlen, ulen >= 4.
5440 */
5441mpd_uint_t *
5442_mpd_kmul_fnt(const mpd_uint_t *u, const mpd_uint_t *v,
5443 mpd_size_t ulen, mpd_size_t vlen,
5444 mpd_size_t *rsize)
5445{
5446 mpd_uint_t *result = NULL, *w = NULL;
5447 mpd_size_t m;
5448
5449 assert(ulen >= 4);
5450 assert(ulen >= vlen);
5451
5452 *rsize = _kmul_resultsize(ulen, vlen);
5453 if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
5454 return NULL;
5455 }
5456
5457 m = _kmul_worksize(ulen, 3*(MPD_MAXTRANSFORM_2N/2));
5458 if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
5459 mpd_free(result); /* GCOV_UNLIKELY */
5460 return NULL; /* GCOV_UNLIKELY */
5461 }
5462
5463 if (!_karatsuba_rec_fnt(result, u, v, w, ulen, vlen)) {
5464 mpd_free(result);
5465 result = NULL;
5466 }
5467
5468
5469 if (w) mpd_free(w);
5470 return result;
5471}
5472
5473
5474/* Deal with the special cases of multiplying infinities. */
5475static void
5476_mpd_qmul_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
5477{
5478 if (mpd_isinfinite(a)) {
5479 if (mpd_iszero(b)) {
5480 mpd_seterror(result, MPD_Invalid_operation, status);
5481 }
5482 else {
5483 mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
5484 }
5485 return;
5486 }
5487 assert(mpd_isinfinite(b));
5488 if (mpd_iszero(a)) {
5489 mpd_seterror(result, MPD_Invalid_operation, status);
5490 }
5491 else {
5492 mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
5493 }
5494}
5495
5496/*
5497 * Internal function: Multiply a and b. _mpd_qmul deals with specials but
5498 * does NOT finalize the result. This is for use in mpd_fma().
5499 */
5500static inline void
5501_mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
5502 const mpd_context_t *ctx, uint32_t *status)
5503{
5504 mpd_t *big = (mpd_t *)a, *small = (mpd_t *)b;
5505 mpd_uint_t *rdata = NULL;
5506 mpd_uint_t rbuf[MPD_MINALLOC_MAX];
5507 mpd_size_t rsize, i;
5508
5509
5510 if (mpd_isspecial(a) || mpd_isspecial(b)) {
5511 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
5512 return;
5513 }
5514 _mpd_qmul_inf(result, a, b, status);
5515 return;
5516 }
5517
5518 if (small->len > big->len) {
5519 _mpd_ptrswap(&big, &small);
5520 }
5521
5522 rsize = big->len + small->len;
5523
5524 if (big->len == 1) {
5525 _mpd_singlemul(result->data, big->data[0], small->data[0]);
5526 goto finish;
5527 }
5528 if (rsize <= (mpd_size_t)MPD_MINALLOC_MAX) {
5529 if (big->len == 2) {
5530 _mpd_mul_2_le2(rbuf, big->data, small->data, small->len);
5531 }
5532 else {
5533 mpd_uint_zero(rbuf, rsize);
5534 if (small->len == 1) {
5535 _mpd_shortmul(rbuf, big->data, big->len, small->data[0]);
5536 }
5537 else {
5538 _mpd_basemul(rbuf, small->data, big->data, small->len, big->len);
5539 }
5540 }
5541 if (!mpd_qresize(result, rsize, status)) {
5542 return;
5543 }
5544 for(i = 0; i < rsize; i++) {
5545 result->data[i] = rbuf[i];
5546 }
5547 goto finish;
5548 }
5549
5550
5551 if (small->len == 1) {
Stefan Krahafc0c772012-06-09 15:28:36 +02005552 rdata = mpd_calloc(rsize, sizeof *rdata);
5553 if (rdata != NULL) {
5554 _mpd_shortmul(rdata, big->data, big->len, small->data[0]);
Stefan Krah1919b7e2012-03-21 18:25:23 +01005555 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01005556 }
5557 else if (rsize <= 1024) {
5558 rdata = _mpd_kmul(big->data, small->data, big->len, small->len, &rsize);
Stefan Krah1919b7e2012-03-21 18:25:23 +01005559 }
5560 else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
5561 rdata = _mpd_fntmul(big->data, small->data, big->len, small->len, &rsize);
Stefan Krah1919b7e2012-03-21 18:25:23 +01005562 }
5563 else {
5564 rdata = _mpd_kmul_fnt(big->data, small->data, big->len, small->len, &rsize);
Stefan Krahafc0c772012-06-09 15:28:36 +02005565 }
5566
5567 if (rdata == NULL) {
5568 mpd_seterror(result, MPD_Malloc_error, status);
5569 return;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005570 }
5571
5572 if (mpd_isdynamic_data(result)) {
5573 mpd_free(result->data);
5574 }
5575 result->data = rdata;
5576 result->alloc = rsize;
5577 mpd_set_dynamic_data(result);
5578
5579
5580finish:
5581 mpd_set_flags(result, mpd_sign(a)^mpd_sign(b));
5582 result->exp = big->exp + small->exp;
5583 result->len = _mpd_real_size(result->data, rsize);
5584 /* resize to smaller cannot fail */
5585 mpd_qresize(result, result->len, status);
5586 mpd_setdigits(result);
5587}
5588
5589/* Multiply a and b. */
5590void
5591mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
5592 const mpd_context_t *ctx, uint32_t *status)
5593{
5594 _mpd_qmul(result, a, b, ctx, status);
5595 mpd_qfinalize(result, ctx, status);
5596}
5597
Stefan Krah3c23a872012-04-20 19:59:20 +02005598/* Multiply a and b. Set NaN/Invalid_operation if the result is inexact. */
5599static void
5600_mpd_qmul_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
5601 const mpd_context_t *ctx, uint32_t *status)
5602{
5603 uint32_t workstatus = 0;
5604
5605 mpd_qmul(result, a, b, ctx, &workstatus);
5606 *status |= workstatus;
5607 if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
5608 mpd_seterror(result, MPD_Invalid_operation, status);
5609 }
5610}
5611
Stefan Krah1919b7e2012-03-21 18:25:23 +01005612/* Multiply decimal and mpd_ssize_t. */
5613void
5614mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
5615 const mpd_context_t *ctx, uint32_t *status)
5616{
5617 mpd_context_t maxcontext;
5618 MPD_NEW_STATIC(bb,0,0,0,0);
5619
5620 mpd_maxcontext(&maxcontext);
5621 mpd_qsset_ssize(&bb, b, &maxcontext, status);
5622 mpd_qmul(result, a, &bb, ctx, status);
5623 mpd_del(&bb);
5624}
5625
5626/* Multiply decimal and mpd_uint_t. */
5627void
5628mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
5629 const mpd_context_t *ctx, uint32_t *status)
5630{
5631 mpd_context_t maxcontext;
5632 MPD_NEW_STATIC(bb,0,0,0,0);
5633
5634 mpd_maxcontext(&maxcontext);
5635 mpd_qsset_uint(&bb, b, &maxcontext, status);
5636 mpd_qmul(result, a, &bb, ctx, status);
5637 mpd_del(&bb);
5638}
5639
5640void
5641mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b,
5642 const mpd_context_t *ctx, uint32_t *status)
5643{
5644 mpd_qmul_ssize(result, a, b, ctx, status);
5645}
5646
5647void
5648mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b,
5649 const mpd_context_t *ctx, uint32_t *status)
5650{
5651 mpd_qmul_uint(result, a, b, ctx, status);
5652}
5653
5654#ifdef CONFIG_64
5655void
5656mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
5657 const mpd_context_t *ctx, uint32_t *status)
5658{
5659 mpd_qmul_ssize(result, a, b, ctx, status);
5660}
5661
5662void
5663mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
5664 const mpd_context_t *ctx, uint32_t *status)
5665{
5666 mpd_qmul_uint(result, a, b, ctx, status);
5667}
5668#endif
5669
5670/* Like the minus operator. */
5671void
5672mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5673 uint32_t *status)
5674{
5675 if (mpd_isspecial(a)) {
5676 if (mpd_qcheck_nan(result, a, ctx, status)) {
5677 return;
5678 }
5679 }
5680
5681 if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
5682 mpd_qcopy_abs(result, a, status);
5683 }
5684 else {
5685 mpd_qcopy_negate(result, a, status);
5686 }
5687
5688 mpd_qfinalize(result, ctx, status);
5689}
5690
5691/* Like the plus operator. */
5692void
5693mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5694 uint32_t *status)
5695{
5696 if (mpd_isspecial(a)) {
5697 if (mpd_qcheck_nan(result, a, ctx, status)) {
5698 return;
5699 }
5700 }
5701
5702 if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
5703 mpd_qcopy_abs(result, a, status);
5704 }
5705 else {
5706 mpd_qcopy(result, a, status);
5707 }
5708
5709 mpd_qfinalize(result, ctx, status);
5710}
5711
5712/* The largest representable number that is smaller than the operand. */
5713void
5714mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5715 uint32_t *status)
5716{
Stefan Krah88e19772012-06-11 08:57:17 +02005717 mpd_context_t workctx;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005718 MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
5719
5720 if (mpd_isspecial(a)) {
5721 if (mpd_qcheck_nan(result, a, ctx, status)) {
5722 return;
5723 }
Stefan Krah88e19772012-06-11 08:57:17 +02005724
5725 assert(mpd_isinfinite(a));
5726 if (mpd_isnegative(a)) {
5727 mpd_qcopy(result, a, status);
5728 return;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005729 }
Stefan Krah88e19772012-06-11 08:57:17 +02005730 else {
5731 mpd_clear_flags(result);
5732 mpd_qmaxcoeff(result, ctx, status);
5733 if (mpd_isnan(result)) {
5734 return;
5735 }
5736 result->exp = mpd_etop(ctx);
5737 return;
5738 }
Stefan Krah1919b7e2012-03-21 18:25:23 +01005739 }
5740
5741 mpd_workcontext(&workctx, ctx);
5742 workctx.round = MPD_ROUND_FLOOR;
5743
5744 if (!mpd_qcopy(result, a, status)) {
5745 return;
5746 }
5747
5748 mpd_qfinalize(result, &workctx, &workctx.status);
5749 if (workctx.status&(MPD_Inexact|MPD_Errors)) {
5750 *status |= (workctx.status&MPD_Errors);
5751 return;
5752 }
5753
5754 workctx.status = 0;
5755 mpd_qsub(result, a, &tiny, &workctx, &workctx.status);
5756 *status |= (workctx.status&MPD_Errors);
5757}
5758
5759/* The smallest representable number that is larger than the operand. */
5760void
5761mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
5762 uint32_t *status)
5763{
5764 mpd_context_t workctx;
5765 MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
5766
5767 if (mpd_isspecial(a)) {
5768 if (mpd_qcheck_nan(result, a, ctx, status)) {
5769 return;
5770 }
Stefan Krah88e19772012-06-11 08:57:17 +02005771
5772 assert(mpd_isinfinite(a));
5773 if (mpd_ispositive(a)) {
5774 mpd_qcopy(result, a, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01005775 }
Stefan Krah88e19772012-06-11 08:57:17 +02005776 else {
5777 mpd_clear_flags(result);
5778 mpd_qmaxcoeff(result, ctx, status);
5779 if (mpd_isnan(result)) {
5780 return;
5781 }
5782 mpd_set_flags(result, MPD_NEG);
5783 result->exp = mpd_etop(ctx);
5784 }
5785 return;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005786 }
5787
5788 mpd_workcontext(&workctx, ctx);
5789 workctx.round = MPD_ROUND_CEILING;
5790
5791 if (!mpd_qcopy(result, a, status)) {
5792 return;
5793 }
5794
5795 mpd_qfinalize(result, &workctx, &workctx.status);
5796 if (workctx.status & (MPD_Inexact|MPD_Errors)) {
5797 *status |= (workctx.status&MPD_Errors);
5798 return;
5799 }
5800
5801 workctx.status = 0;
5802 mpd_qadd(result, a, &tiny, &workctx, &workctx.status);
5803 *status |= (workctx.status&MPD_Errors);
5804}
5805
5806/*
5807 * The number closest to the first operand that is in the direction towards
5808 * the second operand.
5809 */
5810void
5811mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b,
5812 const mpd_context_t *ctx, uint32_t *status)
5813{
5814 int c;
5815
Stefan Krah88e19772012-06-11 08:57:17 +02005816 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
5817 return;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005818 }
5819
5820 c = _mpd_cmp(a, b);
5821 if (c == 0) {
5822 mpd_qcopy_sign(result, a, b, status);
5823 return;
5824 }
5825
5826 if (c < 0) {
5827 mpd_qnext_plus(result, a, ctx, status);
5828 }
5829 else {
5830 mpd_qnext_minus(result, a, ctx, status);
5831 }
5832
5833 if (mpd_isinfinite(result)) {
5834 *status |= (MPD_Overflow|MPD_Rounded|MPD_Inexact);
5835 }
5836 else if (mpd_adjexp(result) < ctx->emin) {
5837 *status |= (MPD_Underflow|MPD_Subnormal|MPD_Rounded|MPD_Inexact);
5838 if (mpd_iszero(result)) {
5839 *status |= MPD_Clamped;
5840 }
5841 }
5842}
5843
5844/*
Stefan Krahb7832932012-06-12 21:06:06 +02005845 * Internal function: Integer power with mpd_uint_t exponent. The function
5846 * can fail with MPD_Malloc_error.
Stefan Krahc62bd132012-06-16 19:45:35 +02005847 *
5848 * The error is equal to the error incurred in k-1 multiplications. Assuming
5849 * the upper bound for the relative error in each operation:
5850 *
5851 * abs(err) = 5 * 10**-prec
5852 * result = x**k * (1 + err)**(k-1)
Stefan Krah1919b7e2012-03-21 18:25:23 +01005853 */
5854static inline void
Stefan Krahb7832932012-06-12 21:06:06 +02005855_mpd_qpow_uint(mpd_t *result, const mpd_t *base, mpd_uint_t exp,
5856 uint8_t resultsign, const mpd_context_t *ctx, uint32_t *status)
Stefan Krah1919b7e2012-03-21 18:25:23 +01005857{
5858 uint32_t workstatus = 0;
5859 mpd_uint_t n;
5860
5861 if (exp == 0) {
5862 _settriple(result, resultsign, 1, 0); /* GCOV_NOT_REACHED */
5863 return; /* GCOV_NOT_REACHED */
5864 }
5865
5866 if (!mpd_qcopy(result, base, status)) {
5867 return;
5868 }
5869
5870 n = mpd_bits[mpd_bsr(exp)];
5871 while (n >>= 1) {
5872 mpd_qmul(result, result, result, ctx, &workstatus);
5873 if (exp & n) {
5874 mpd_qmul(result, result, base, ctx, &workstatus);
5875 }
Stefan Krahb7832932012-06-12 21:06:06 +02005876 if (mpd_isspecial(result) ||
5877 (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005878 break;
5879 }
5880 }
5881
5882 *status |= workstatus;
5883 mpd_set_sign(result, resultsign);
5884}
5885
5886/*
5887 * Internal function: Integer power with mpd_t exponent, tbase and texp
5888 * are modified!! Function can fail with MPD_Malloc_error.
Stefan Krahc62bd132012-06-16 19:45:35 +02005889 *
5890 * The error is equal to the error incurred in k multiplications. Assuming
5891 * the upper bound for the relative error in each operation:
5892 *
5893 * abs(err) = 5 * 10**-prec
5894 * result = x**k * (1 + err)**k
Stefan Krah1919b7e2012-03-21 18:25:23 +01005895 */
5896static inline void
5897_mpd_qpow_mpd(mpd_t *result, mpd_t *tbase, mpd_t *texp, uint8_t resultsign,
5898 const mpd_context_t *ctx, uint32_t *status)
5899{
5900 uint32_t workstatus = 0;
5901 mpd_context_t maxctx;
5902 MPD_NEW_CONST(two,0,0,1,1,1,2);
5903
5904
5905 mpd_maxcontext(&maxctx);
5906
5907 /* resize to smaller cannot fail */
5908 mpd_qcopy(result, &one, status);
5909
5910 while (!mpd_iszero(texp)) {
5911 if (mpd_isodd(texp)) {
5912 mpd_qmul(result, result, tbase, ctx, &workstatus);
5913 *status |= workstatus;
Stefan Krahc62bd132012-06-16 19:45:35 +02005914 if (mpd_isspecial(result) ||
5915 (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01005916 break;
5917 }
5918 }
5919 mpd_qmul(tbase, tbase, tbase, ctx, &workstatus);
5920 mpd_qdivint(texp, texp, &two, &maxctx, &workstatus);
5921 if (mpd_isnan(tbase) || mpd_isnan(texp)) {
5922 mpd_seterror(result, workstatus&MPD_Errors, status);
5923 return;
5924 }
5925 }
5926 mpd_set_sign(result, resultsign);
5927}
5928
5929/*
Stefan Krahc62bd132012-06-16 19:45:35 +02005930 * The power function for integer exponents. Relative error _before_ the
5931 * final rounding to prec:
5932 * abs(result - base**exp) < 0.1 * 10**-prec * abs(base**exp)
Stefan Krah1919b7e2012-03-21 18:25:23 +01005933 */
5934static void
5935_mpd_qpow_int(mpd_t *result, const mpd_t *base, const mpd_t *exp,
5936 uint8_t resultsign,
5937 const mpd_context_t *ctx, uint32_t *status)
5938{
5939 mpd_context_t workctx;
5940 MPD_NEW_STATIC(tbase,0,0,0,0);
5941 MPD_NEW_STATIC(texp,0,0,0,0);
5942 mpd_ssize_t n;
5943
5944
5945 mpd_workcontext(&workctx, ctx);
5946 workctx.prec += (exp->digits + exp->exp + 2);
5947 workctx.round = MPD_ROUND_HALF_EVEN;
5948 workctx.clamp = 0;
5949 if (mpd_isnegative(exp)) {
Stefan Krahc62bd132012-06-16 19:45:35 +02005950 workctx.prec += 1;
Stefan Krah1919b7e2012-03-21 18:25:23 +01005951 mpd_qdiv(&tbase, &one, base, &workctx, status);
5952 if (*status&MPD_Errors) {
5953 mpd_setspecial(result, MPD_POS, MPD_NAN);
5954 goto finish;
5955 }
5956 }
5957 else {
5958 if (!mpd_qcopy(&tbase, base, status)) {
5959 mpd_setspecial(result, MPD_POS, MPD_NAN);
5960 goto finish;
5961 }
5962 }
5963
5964 n = mpd_qabs_uint(exp, &workctx.status);
5965 if (workctx.status&MPD_Invalid_operation) {
5966 if (!mpd_qcopy(&texp, exp, status)) {
5967 mpd_setspecial(result, MPD_POS, MPD_NAN); /* GCOV_UNLIKELY */
5968 goto finish; /* GCOV_UNLIKELY */
5969 }
5970 _mpd_qpow_mpd(result, &tbase, &texp, resultsign, &workctx, status);
5971 }
5972 else {
5973 _mpd_qpow_uint(result, &tbase, n, resultsign, &workctx, status);
5974 }
5975
5976 if (mpd_isinfinite(result)) {
5977 /* for ROUND_DOWN, ROUND_FLOOR, etc. */
5978 _settriple(result, resultsign, 1, MPD_EXP_INF);
5979 }
5980
5981finish:
5982 mpd_del(&tbase);
5983 mpd_del(&texp);
5984 mpd_qfinalize(result, ctx, status);
5985}
5986
Stefan Krah9c1feb82012-06-18 19:57:23 +02005987/*
5988 * If the exponent is infinite and base equals one, the result is one
5989 * with a coefficient of length prec. Otherwise, result is undefined.
5990 * Return the value of the comparison against one.
Stefan Krah1919b7e2012-03-21 18:25:23 +01005991 */
5992static int
5993_qcheck_pow_one_inf(mpd_t *result, const mpd_t *base, uint8_t resultsign,
5994 const mpd_context_t *ctx, uint32_t *status)
5995{
5996 mpd_ssize_t shift;
5997 int cmp;
5998
5999 if ((cmp = _mpd_cmp(base, &one)) == 0) {
6000 shift = ctx->prec-1;
6001 mpd_qshiftl(result, &one, shift, status);
6002 result->exp = -shift;
6003 mpd_set_flags(result, resultsign);
6004 *status |= (MPD_Inexact|MPD_Rounded);
6005 }
6006
6007 return cmp;
6008}
6009
6010/*
Stefan Krah9c1feb82012-06-18 19:57:23 +02006011 * If abs(base) equals one, calculate the correct power of one result.
Stefan Krah1919b7e2012-03-21 18:25:23 +01006012 * Otherwise, result is undefined. Return the value of the comparison
6013 * against 1.
6014 *
6015 * This is an internal function that does not check for specials.
6016 */
6017static int
6018_qcheck_pow_one(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6019 uint8_t resultsign,
6020 const mpd_context_t *ctx, uint32_t *status)
6021{
6022 uint32_t workstatus = 0;
6023 mpd_ssize_t shift;
6024 int cmp;
6025
6026 if ((cmp = _mpd_cmp_abs(base, &one)) == 0) {
6027 if (_mpd_isint(exp)) {
6028 if (mpd_isnegative(exp)) {
6029 _settriple(result, resultsign, 1, 0);
6030 return 0;
6031 }
6032 /* 1.000**3 = 1.000000000 */
6033 mpd_qmul_ssize(result, exp, -base->exp, ctx, &workstatus);
6034 if (workstatus&MPD_Errors) {
6035 *status |= (workstatus&MPD_Errors);
6036 return 0;
6037 }
6038 /* digits-1 after exponentiation */
6039 shift = mpd_qget_ssize(result, &workstatus);
6040 /* shift is MPD_SSIZE_MAX if result is too large */
6041 if (shift > ctx->prec-1) {
6042 shift = ctx->prec-1;
6043 *status |= MPD_Rounded;
6044 }
6045 }
6046 else if (mpd_ispositive(base)) {
6047 shift = ctx->prec-1;
6048 *status |= (MPD_Inexact|MPD_Rounded);
6049 }
6050 else {
6051 return -2; /* GCOV_NOT_REACHED */
6052 }
6053 if (!mpd_qshiftl(result, &one, shift, status)) {
6054 return 0;
6055 }
6056 result->exp = -shift;
6057 mpd_set_flags(result, resultsign);
6058 }
6059
6060 return cmp;
6061}
6062
6063/*
6064 * Detect certain over/underflow of x**y.
Stefan Krah9c1feb82012-06-18 19:57:23 +02006065 * ACL2 proof: pow-bounds.lisp.
Stefan Krah1919b7e2012-03-21 18:25:23 +01006066 *
6067 * Symbols:
6068 *
6069 * e: EXP_INF or EXP_CLAMP
6070 * x: base
6071 * y: exponent
6072 *
6073 * omega(e) = log10(abs(e))
6074 * zeta(x) = log10(abs(log10(x)))
6075 * theta(y) = log10(abs(y))
6076 *
6077 * Upper and lower bounds:
6078 *
6079 * ub_omega(e) = ceil(log10(abs(e)))
6080 * lb_theta(y) = floor(log10(abs(y)))
6081 *
6082 * | floor(log10(floor(abs(log10(x))))) if x < 1/10 or x >= 10
6083 * lb_zeta(x) = | floor(log10(abs(x-1)/10)) if 1/10 <= x < 1
6084 * | floor(log10(abs((x-1)/100))) if 1 < x < 10
6085 *
6086 * ub_omega(e) and lb_theta(y) are obviously upper and lower bounds
6087 * for omega(e) and theta(y).
6088 *
6089 * lb_zeta is a lower bound for zeta(x):
6090 *
6091 * x < 1/10 or x >= 10:
6092 *
6093 * abs(log10(x)) >= 1, so the outer log10 is well defined. Since log10
6094 * is strictly increasing, the end result is a lower bound.
6095 *
6096 * 1/10 <= x < 1:
6097 *
6098 * We use: log10(x) <= (x-1)/log(10)
6099 * abs(log10(x)) >= abs(x-1)/log(10)
6100 * abs(log10(x)) >= abs(x-1)/10
6101 *
6102 * 1 < x < 10:
6103 *
6104 * We use: (x-1)/(x*log(10)) < log10(x)
6105 * abs((x-1)/100) < abs(log10(x))
6106 *
6107 * XXX: abs((x-1)/10) would work, need ACL2 proof.
6108 *
6109 *
6110 * Let (0 < x < 1 and y < 0) or (x > 1 and y > 0). (H1)
6111 * Let ub_omega(exp_inf) < lb_zeta(x) + lb_theta(y) (H2)
6112 *
6113 * Then:
6114 * log10(abs(exp_inf)) < log10(abs(log10(x))) + log10(abs(y)). (1)
6115 * exp_inf < log10(x) * y (2)
6116 * 10**exp_inf < x**y (3)
6117 *
6118 * Let (0 < x < 1 and y > 0) or (x > 1 and y < 0). (H3)
6119 * Let ub_omega(exp_clamp) < lb_zeta(x) + lb_theta(y) (H4)
6120 *
6121 * Then:
6122 * log10(abs(exp_clamp)) < log10(abs(log10(x))) + log10(abs(y)). (4)
6123 * log10(x) * y < exp_clamp (5)
6124 * x**y < 10**exp_clamp (6)
6125 *
6126 */
6127static mpd_ssize_t
6128_lower_bound_zeta(const mpd_t *x, uint32_t *status)
6129{
6130 mpd_context_t maxctx;
6131 MPD_NEW_STATIC(scratch,0,0,0,0);
6132 mpd_ssize_t t, u;
6133
6134 t = mpd_adjexp(x);
6135 if (t > 0) {
Stefan Krahcd9e1d02012-03-23 16:22:05 +01006136 /* x >= 10 -> floor(log10(floor(abs(log10(x))))) */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006137 return mpd_exp_digits(t) - 1;
6138 }
6139 else if (t < -1) {
6140 /* x < 1/10 -> floor(log10(floor(abs(log10(x))))) */
6141 return mpd_exp_digits(t+1) - 1;
6142 }
6143 else {
6144 mpd_maxcontext(&maxctx);
6145 mpd_qsub(&scratch, x, &one, &maxctx, status);
6146 if (mpd_isspecial(&scratch)) {
6147 mpd_del(&scratch);
6148 return MPD_SSIZE_MAX;
6149 }
6150 u = mpd_adjexp(&scratch);
6151 mpd_del(&scratch);
6152
6153 /* t == -1, 1/10 <= x < 1 -> floor(log10(abs(x-1)/10))
6154 * t == 0, 1 < x < 10 -> floor(log10(abs(x-1)/100)) */
6155 return (t == 0) ? u-2 : u-1;
6156 }
6157}
6158
6159/*
6160 * Detect cases of certain overflow/underflow in the power function.
6161 * Assumptions: x != 1, y != 0. The proof above is for positive x.
6162 * If x is negative and y is an odd integer, x**y == -(abs(x)**y),
6163 * so the analysis does not change.
6164 */
6165static int
6166_qcheck_pow_bounds(mpd_t *result, const mpd_t *x, const mpd_t *y,
6167 uint8_t resultsign,
6168 const mpd_context_t *ctx, uint32_t *status)
6169{
6170 MPD_NEW_SHARED(abs_x, x);
6171 mpd_ssize_t ub_omega, lb_zeta, lb_theta;
6172 uint8_t sign;
6173
6174 mpd_set_positive(&abs_x);
6175
6176 lb_theta = mpd_adjexp(y);
6177 lb_zeta = _lower_bound_zeta(&abs_x, status);
6178 if (lb_zeta == MPD_SSIZE_MAX) {
6179 mpd_seterror(result, MPD_Malloc_error, status);
6180 return 1;
6181 }
6182
6183 sign = (mpd_adjexp(&abs_x) < 0) ^ mpd_sign(y);
6184 if (sign == 0) {
6185 /* (0 < |x| < 1 and y < 0) or (|x| > 1 and y > 0) */
6186 ub_omega = mpd_exp_digits(ctx->emax);
6187 if (ub_omega < lb_zeta + lb_theta) {
6188 _settriple(result, resultsign, 1, MPD_EXP_INF);
6189 mpd_qfinalize(result, ctx, status);
6190 return 1;
6191 }
6192 }
6193 else {
Stefan Krahcd9e1d02012-03-23 16:22:05 +01006194 /* (0 < |x| < 1 and y > 0) or (|x| > 1 and y < 0). */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006195 ub_omega = mpd_exp_digits(mpd_etiny(ctx));
6196 if (ub_omega < lb_zeta + lb_theta) {
6197 _settriple(result, resultsign, 1, mpd_etiny(ctx)-1);
6198 mpd_qfinalize(result, ctx, status);
6199 return 1;
6200 }
6201 }
6202
6203 return 0;
6204}
6205
6206/*
6207 * TODO: Implement algorithm for computing exact powers from decimal.py.
6208 * In order to prevent infinite loops, this has to be called before
6209 * using Ziv's strategy for correct rounding.
6210 */
6211/*
6212static int
6213_mpd_qpow_exact(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6214 const mpd_context_t *ctx, uint32_t *status)
6215{
6216 return 0;
6217}
6218*/
6219
Stefan Krah9c1feb82012-06-18 19:57:23 +02006220/*
6221 * The power function for real exponents.
6222 * Relative error: abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
6223 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006224static void
6225_mpd_qpow_real(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6226 const mpd_context_t *ctx, uint32_t *status)
6227{
6228 mpd_context_t workctx;
6229 MPD_NEW_STATIC(texp,0,0,0,0);
6230
6231 if (!mpd_qcopy(&texp, exp, status)) {
6232 mpd_seterror(result, MPD_Malloc_error, status);
6233 return;
6234 }
6235
6236 mpd_maxcontext(&workctx);
6237 workctx.prec = (base->digits > ctx->prec) ? base->digits : ctx->prec;
6238 workctx.prec += (4 + MPD_EXPDIGITS);
6239 workctx.round = MPD_ROUND_HALF_EVEN;
6240 workctx.allcr = ctx->allcr;
6241
Stefan Krah9c1feb82012-06-18 19:57:23 +02006242 /*
6243 * extra := MPD_EXPDIGITS = MPD_EXP_MAX_T
6244 * wp := prec + 4 + extra
6245 * abs(err) < 5 * 10**-wp
6246 * y := log(base) * exp
6247 * Calculate:
6248 * 1) e**(y * (1 + err)**2) * (1 + err)
6249 * = e**y * e**(y * (2*err + err**2)) * (1 + err)
6250 * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
6251 * Relative error of the underlined term:
6252 * 2) abs(e**(y * (2*err + err**2)) - 1)
6253 * Case abs(y) >= 10**extra:
6254 * 3) adjexp(y)+1 > log10(abs(y)) >= extra
6255 * This triggers the Overflow/Underflow shortcut in _mpd_qexp(),
6256 * so no further analysis is necessary.
6257 * Case abs(y) < 10**extra:
6258 * 4) abs(y * (2*err + err**2)) < 1/5 * 10**(-prec - 2)
6259 * Use (see _mpd_qexp):
6260 * 5) abs(x) <= 9/10 * 10**-p ==> abs(e**x - 1) < 10**-p
6261 * With 2), 4) and 5):
6262 * 6) abs(e**(y * (2*err + err**2)) - 1) < 10**(-prec - 2)
6263 * The complete relative error of 1) is:
6264 * 7) abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
6265 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006266 mpd_qln(result, base, &workctx, &workctx.status);
6267 mpd_qmul(result, result, &texp, &workctx, &workctx.status);
6268 mpd_qexp(result, result, &workctx, status);
6269
6270 mpd_del(&texp);
6271 *status |= (workctx.status&MPD_Errors);
6272 *status |= (MPD_Inexact|MPD_Rounded);
6273}
6274
6275/* The power function: base**exp */
6276void
6277mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6278 const mpd_context_t *ctx, uint32_t *status)
6279{
6280 uint8_t resultsign = 0;
6281 int intexp = 0;
6282 int cmp;
6283
6284 if (mpd_isspecial(base) || mpd_isspecial(exp)) {
6285 if (mpd_qcheck_nans(result, base, exp, ctx, status)) {
6286 return;
6287 }
6288 }
6289 if (mpd_isinteger(exp)) {
6290 intexp = 1;
6291 resultsign = mpd_isnegative(base) && mpd_isodd(exp);
6292 }
6293
6294 if (mpd_iszero(base)) {
6295 if (mpd_iszero(exp)) {
6296 mpd_seterror(result, MPD_Invalid_operation, status);
6297 }
6298 else if (mpd_isnegative(exp)) {
6299 mpd_setspecial(result, resultsign, MPD_INF);
6300 }
6301 else {
6302 _settriple(result, resultsign, 0, 0);
6303 }
6304 return;
6305 }
6306 if (mpd_isnegative(base)) {
6307 if (!intexp || mpd_isinfinite(exp)) {
6308 mpd_seterror(result, MPD_Invalid_operation, status);
6309 return;
6310 }
6311 }
6312 if (mpd_isinfinite(exp)) {
6313 /* power of one */
6314 cmp = _qcheck_pow_one_inf(result, base, resultsign, ctx, status);
6315 if (cmp == 0) {
6316 return;
6317 }
6318 else {
6319 cmp *= mpd_arith_sign(exp);
6320 if (cmp < 0) {
6321 _settriple(result, resultsign, 0, 0);
6322 }
6323 else {
6324 mpd_setspecial(result, resultsign, MPD_INF);
6325 }
6326 }
6327 return;
6328 }
6329 if (mpd_isinfinite(base)) {
6330 if (mpd_iszero(exp)) {
6331 _settriple(result, resultsign, 1, 0);
6332 }
6333 else if (mpd_isnegative(exp)) {
6334 _settriple(result, resultsign, 0, 0);
6335 }
6336 else {
6337 mpd_setspecial(result, resultsign, MPD_INF);
6338 }
6339 return;
6340 }
6341 if (mpd_iszero(exp)) {
6342 _settriple(result, resultsign, 1, 0);
6343 return;
6344 }
6345 if (_qcheck_pow_one(result, base, exp, resultsign, ctx, status) == 0) {
6346 return;
6347 }
6348 if (_qcheck_pow_bounds(result, base, exp, resultsign, ctx, status)) {
6349 return;
6350 }
6351
6352 if (intexp) {
6353 _mpd_qpow_int(result, base, exp, resultsign, ctx, status);
6354 }
6355 else {
6356 _mpd_qpow_real(result, base, exp, ctx, status);
6357 if (!mpd_isspecial(result) && _mpd_cmp(result, &one) == 0) {
6358 mpd_ssize_t shift = ctx->prec-1;
6359 mpd_qshiftl(result, &one, shift, status);
6360 result->exp = -shift;
6361 }
6362 if (mpd_isinfinite(result)) {
6363 /* for ROUND_DOWN, ROUND_FLOOR, etc. */
6364 _settriple(result, MPD_POS, 1, MPD_EXP_INF);
6365 }
6366 mpd_qfinalize(result, ctx, status);
6367 }
6368}
6369
6370/*
6371 * Internal function: Integer powmod with mpd_uint_t exponent, base is modified!
6372 * Function can fail with MPD_Malloc_error.
6373 */
6374static inline void
6375_mpd_qpowmod_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
6376 mpd_t *mod, uint32_t *status)
6377{
6378 mpd_context_t maxcontext;
6379
6380 mpd_maxcontext(&maxcontext);
6381
6382 /* resize to smaller cannot fail */
6383 mpd_qcopy(result, &one, status);
6384
6385 while (exp > 0) {
6386 if (exp & 1) {
6387 mpd_qmul(result, result, base, &maxcontext, status);
6388 mpd_qrem(result, result, mod, &maxcontext, status);
6389 }
6390 mpd_qmul(base, base, base, &maxcontext, status);
6391 mpd_qrem(base, base, mod, &maxcontext, status);
6392 exp >>= 1;
6393 }
6394}
6395
6396/* The powmod function: (base**exp) % mod */
6397void
6398mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp,
6399 const mpd_t *mod,
6400 const mpd_context_t *ctx, uint32_t *status)
6401{
6402 mpd_context_t maxcontext;
6403 MPD_NEW_STATIC(tbase,0,0,0,0);
6404 MPD_NEW_STATIC(texp,0,0,0,0);
6405 MPD_NEW_STATIC(tmod,0,0,0,0);
6406 MPD_NEW_STATIC(tmp,0,0,0,0);
6407 MPD_NEW_CONST(two,0,0,1,1,1,2);
6408 mpd_ssize_t tbase_exp, texp_exp;
6409 mpd_ssize_t i;
6410 mpd_t t;
6411 mpd_uint_t r;
6412 uint8_t sign;
6413
6414
6415 if (mpd_isspecial(base) || mpd_isspecial(exp) || mpd_isspecial(mod)) {
6416 if (mpd_qcheck_3nans(result, base, exp, mod, ctx, status)) {
6417 return;
6418 }
6419 mpd_seterror(result, MPD_Invalid_operation, status);
6420 return;
6421 }
6422
6423
6424 if (!_mpd_isint(base) || !_mpd_isint(exp) || !_mpd_isint(mod)) {
6425 mpd_seterror(result, MPD_Invalid_operation, status);
6426 return;
6427 }
6428 if (mpd_iszerocoeff(mod)) {
6429 mpd_seterror(result, MPD_Invalid_operation, status);
6430 return;
6431 }
6432 if (mod->digits+mod->exp > ctx->prec) {
6433 mpd_seterror(result, MPD_Invalid_operation, status);
6434 return;
6435 }
6436
6437 sign = (mpd_isnegative(base)) && (mpd_isodd(exp));
6438 if (mpd_iszerocoeff(exp)) {
6439 if (mpd_iszerocoeff(base)) {
6440 mpd_seterror(result, MPD_Invalid_operation, status);
6441 return;
6442 }
6443 r = (_mpd_cmp_abs(mod, &one)==0) ? 0 : 1;
6444 _settriple(result, sign, r, 0);
6445 return;
6446 }
6447 if (mpd_isnegative(exp)) {
6448 mpd_seterror(result, MPD_Invalid_operation, status);
6449 return;
6450 }
6451 if (mpd_iszerocoeff(base)) {
6452 _settriple(result, sign, 0, 0);
6453 return;
6454 }
6455
6456 if (!mpd_qcopy(&tmod, mod, status)) {
6457 goto mpd_errors;
6458 }
6459 mpd_set_positive(&tmod);
6460
6461 mpd_maxcontext(&maxcontext);
6462
6463 mpd_qround_to_int(&tbase, base, &maxcontext, status);
6464 mpd_qround_to_int(&texp, exp, &maxcontext, status);
6465 mpd_qround_to_int(&tmod, &tmod, &maxcontext, status);
6466
6467 tbase_exp = tbase.exp;
6468 tbase.exp = 0;
6469 texp_exp = texp.exp;
6470 texp.exp = 0;
6471
6472 /* base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo */
6473 mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
6474 _settriple(result, MPD_POS, 1, tbase_exp);
6475 mpd_qrem(result, result, &tmod, &maxcontext, status);
6476 mpd_qmul(&tbase, &tbase, result, &maxcontext, status);
6477 mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
6478 if (mpd_isspecial(&tbase) ||
6479 mpd_isspecial(&texp) ||
6480 mpd_isspecial(&tmod)) {
6481 goto mpd_errors;
6482 }
6483
6484 for (i = 0; i < texp_exp; i++) {
6485 _mpd_qpowmod_uint(&tmp, &tbase, 10, &tmod, status);
6486 t = tmp;
6487 tmp = tbase;
6488 tbase = t;
6489 }
6490 if (mpd_isspecial(&tbase)) {
6491 goto mpd_errors; /* GCOV_UNLIKELY */
6492 }
6493
6494 /* resize to smaller cannot fail */
6495 mpd_qcopy(result, &one, status);
6496 while (mpd_isfinite(&texp) && !mpd_iszero(&texp)) {
6497 if (mpd_isodd(&texp)) {
6498 mpd_qmul(result, result, &tbase, &maxcontext, status);
6499 mpd_qrem(result, result, &tmod, &maxcontext, status);
6500 }
6501 mpd_qmul(&tbase, &tbase, &tbase, &maxcontext, status);
6502 mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
6503 mpd_qdivint(&texp, &texp, &two, &maxcontext, status);
6504 }
6505 if (mpd_isspecial(&texp) || mpd_isspecial(&tbase) ||
Stefan Krah7cc55212012-03-21 20:21:20 +01006506 mpd_isspecial(&tmod) || mpd_isspecial(result)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01006507 /* MPD_Malloc_error */
6508 goto mpd_errors;
6509 }
6510 else {
6511 mpd_set_sign(result, sign);
6512 }
6513
6514out:
6515 mpd_del(&tbase);
6516 mpd_del(&texp);
6517 mpd_del(&tmod);
6518 mpd_del(&tmp);
6519 mpd_qfinalize(result, ctx, status);
6520 return;
6521
6522mpd_errors:
6523 mpd_setspecial(result, MPD_POS, MPD_NAN);
6524 goto out;
6525}
6526
6527void
6528mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b,
6529 const mpd_context_t *ctx, uint32_t *status)
6530{
6531 uint32_t workstatus = 0;
6532 mpd_ssize_t b_exp = b->exp;
6533 mpd_ssize_t expdiff, shift;
6534 mpd_uint_t rnd;
6535
6536 if (mpd_isspecial(a) || mpd_isspecial(b)) {
6537 if (mpd_qcheck_nans(result, a, b, ctx, status)) {
6538 return;
6539 }
6540 if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
6541 mpd_qcopy(result, a, status);
6542 return;
6543 }
6544 mpd_seterror(result, MPD_Invalid_operation, status);
6545 return;
6546 }
6547
6548 if (b->exp > ctx->emax || b->exp < mpd_etiny(ctx)) {
6549 mpd_seterror(result, MPD_Invalid_operation, status);
6550 return;
6551 }
6552
6553 if (mpd_iszero(a)) {
6554 _settriple(result, mpd_sign(a), 0, b->exp);
6555 mpd_qfinalize(result, ctx, status);
6556 return;
6557 }
6558
6559
6560 expdiff = a->exp - b->exp;
6561 if (a->digits + expdiff > ctx->prec) {
6562 mpd_seterror(result, MPD_Invalid_operation, status);
6563 return;
6564 }
6565
6566 if (expdiff >= 0) {
6567 shift = expdiff;
6568 if (!mpd_qshiftl(result, a, shift, status)) {
6569 return;
6570 }
6571 result->exp = b_exp;
6572 }
6573 else {
6574 /* At this point expdiff < 0 and a->digits+expdiff <= prec,
6575 * so the shift before an increment will fit in prec. */
6576 shift = -expdiff;
6577 rnd = mpd_qshiftr(result, a, shift, status);
6578 if (rnd == MPD_UINT_MAX) {
6579 return;
6580 }
6581 result->exp = b_exp;
6582 if (!_mpd_apply_round_fit(result, rnd, ctx, status)) {
6583 return;
6584 }
6585 workstatus |= MPD_Rounded;
6586 if (rnd) {
6587 workstatus |= MPD_Inexact;
6588 }
6589 }
6590
6591 if (mpd_adjexp(result) > ctx->emax ||
6592 mpd_adjexp(result) < mpd_etiny(ctx)) {
6593 mpd_seterror(result, MPD_Invalid_operation, status);
6594 return;
6595 }
6596
6597 *status |= workstatus;
6598 mpd_qfinalize(result, ctx, status);
6599}
6600
6601void
6602mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6603 uint32_t *status)
6604{
6605 mpd_ssize_t shift, maxexp, maxshift;
6606 uint8_t sign_a = mpd_sign(a);
6607
6608 if (mpd_isspecial(a)) {
6609 if (mpd_qcheck_nan(result, a, ctx, status)) {
6610 return;
6611 }
6612 mpd_qcopy(result, a, status);
6613 return;
6614 }
6615
6616 if (!mpd_qcopy(result, a, status)) {
6617 return;
6618 }
6619 mpd_qfinalize(result, ctx, status);
6620 if (mpd_isspecial(result)) {
6621 return;
6622 }
6623 if (mpd_iszero(result)) {
6624 _settriple(result, sign_a, 0, 0);
6625 return;
6626 }
6627
6628 shift = mpd_trail_zeros(result);
6629 maxexp = (ctx->clamp) ? mpd_etop(ctx) : ctx->emax;
6630 /* After the finalizing above result->exp <= maxexp. */
6631 maxshift = maxexp - result->exp;
6632 shift = (shift > maxshift) ? maxshift : shift;
6633
6634 mpd_qshiftr_inplace(result, shift);
6635 result->exp += shift;
6636}
6637
6638void
6639mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
6640 uint32_t *status)
6641{
6642 MPD_NEW_STATIC(q,0,0,0,0);
6643
6644 if (mpd_isspecial(a) || mpd_isspecial(b)) {
6645 if (mpd_qcheck_nans(r, a, b, ctx, status)) {
6646 return;
6647 }
6648 if (mpd_isinfinite(a)) {
6649 mpd_seterror(r, MPD_Invalid_operation, status);
6650 return;
6651 }
6652 if (mpd_isinfinite(b)) {
6653 mpd_qcopy(r, a, status);
6654 mpd_qfinalize(r, ctx, status);
6655 return;
6656 }
6657 /* debug */
6658 abort(); /* GCOV_NOT_REACHED */
6659 }
6660 if (mpd_iszerocoeff(b)) {
6661 if (mpd_iszerocoeff(a)) {
6662 mpd_seterror(r, MPD_Division_undefined, status);
6663 }
6664 else {
6665 mpd_seterror(r, MPD_Invalid_operation, status);
6666 }
6667 return;
6668 }
6669
6670 _mpd_qdivmod(&q, r, a, b, ctx, status);
6671 mpd_del(&q);
6672 mpd_qfinalize(r, ctx, status);
6673}
6674
6675void
6676mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
6677 const mpd_context_t *ctx, uint32_t *status)
6678{
6679 mpd_context_t workctx;
6680 MPD_NEW_STATIC(btmp,0,0,0,0);
6681 MPD_NEW_STATIC(q,0,0,0,0);
Stefan Krah22385012012-06-20 23:34:58 +02006682 mpd_ssize_t expdiff, qdigits;
Stefan Krah1919b7e2012-03-21 18:25:23 +01006683 int cmp, isodd, allnine;
6684
6685 if (mpd_isspecial(a) || mpd_isspecial(b)) {
6686 if (mpd_qcheck_nans(r, a, b, ctx, status)) {
6687 return;
6688 }
6689 if (mpd_isinfinite(a)) {
6690 mpd_seterror(r, MPD_Invalid_operation, status);
6691 return;
6692 }
6693 if (mpd_isinfinite(b)) {
6694 mpd_qcopy(r, a, status);
6695 mpd_qfinalize(r, ctx, status);
6696 return;
6697 }
6698 /* debug */
6699 abort(); /* GCOV_NOT_REACHED */
6700 }
6701 if (mpd_iszerocoeff(b)) {
6702 if (mpd_iszerocoeff(a)) {
6703 mpd_seterror(r, MPD_Division_undefined, status);
6704 }
6705 else {
6706 mpd_seterror(r, MPD_Invalid_operation, status);
6707 }
6708 return;
6709 }
6710
6711 if (r == b) {
6712 if (!mpd_qcopy(&btmp, b, status)) {
6713 mpd_seterror(r, MPD_Malloc_error, status);
6714 return;
6715 }
6716 b = &btmp;
6717 }
6718
Stefan Krah22385012012-06-20 23:34:58 +02006719 _mpd_qdivmod(&q, r, a, b, ctx, status);
6720 if (mpd_isnan(&q) || mpd_isnan(r)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01006721 goto finish;
6722 }
6723 if (mpd_iszerocoeff(r)) {
6724 goto finish;
6725 }
6726
Stefan Krah1919b7e2012-03-21 18:25:23 +01006727 expdiff = mpd_adjexp(b) - mpd_adjexp(r);
6728 if (-1 <= expdiff && expdiff <= 1) {
6729
Stefan Krah1919b7e2012-03-21 18:25:23 +01006730 allnine = mpd_coeff_isallnine(&q);
Stefan Krah22385012012-06-20 23:34:58 +02006731 qdigits = q.digits;
Stefan Krah1919b7e2012-03-21 18:25:23 +01006732 isodd = mpd_isodd(&q);
6733
6734 mpd_maxcontext(&workctx);
6735 if (mpd_sign(a) == mpd_sign(b)) {
Stefan Krah22385012012-06-20 23:34:58 +02006736 /* sign(r) == sign(b) */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006737 _mpd_qsub(&q, r, b, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01006738 }
6739 else {
Stefan Krah22385012012-06-20 23:34:58 +02006740 /* sign(r) != sign(b) */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006741 _mpd_qadd(&q, r, b, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01006742 }
6743
Stefan Krah22385012012-06-20 23:34:58 +02006744 if (workctx.status&MPD_Errors) {
6745 mpd_seterror(r, workctx.status&MPD_Errors, status);
6746 goto finish;
6747 }
6748
6749 cmp = _mpd_cmp_abs(&q, r);
Stefan Krah1919b7e2012-03-21 18:25:23 +01006750 if (cmp < 0 || (cmp == 0 && isodd)) {
Stefan Krah22385012012-06-20 23:34:58 +02006751 /* abs(r) > abs(b)/2 or abs(r) == abs(b)/2 and isodd(quotient) */
6752 if (allnine && qdigits == ctx->prec) {
6753 /* abs(quotient) + 1 == 10**prec */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006754 mpd_seterror(r, MPD_Division_impossible, status);
6755 goto finish;
6756 }
6757 mpd_qcopy(r, &q, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01006758 }
6759 }
6760
6761
6762finish:
6763 mpd_del(&btmp);
6764 mpd_del(&q);
6765 mpd_qfinalize(r, ctx, status);
6766}
6767
6768static void
6769_mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
6770 const mpd_context_t *ctx, uint32_t *status)
6771{
6772 mpd_ssize_t expdiff, shift;
6773 mpd_uint_t rnd;
6774
6775 if (mpd_isspecial(a)) {
6776 mpd_qcopy(result, a, status);
6777 return;
6778 }
6779
6780 if (mpd_iszero(a)) {
6781 _settriple(result, mpd_sign(a), 0, exp);
6782 return;
6783 }
6784
6785 expdiff = a->exp - exp;
6786 if (expdiff >= 0) {
6787 shift = expdiff;
6788 if (a->digits + shift > MPD_MAX_PREC+1) {
6789 mpd_seterror(result, MPD_Invalid_operation, status);
6790 return;
6791 }
6792 if (!mpd_qshiftl(result, a, shift, status)) {
6793 return;
6794 }
6795 result->exp = exp;
6796 }
6797 else {
6798 shift = -expdiff;
6799 rnd = mpd_qshiftr(result, a, shift, status);
6800 if (rnd == MPD_UINT_MAX) {
6801 return;
6802 }
6803 result->exp = exp;
6804 _mpd_apply_round_excess(result, rnd, ctx, status);
6805 *status |= MPD_Rounded;
6806 if (rnd) {
6807 *status |= MPD_Inexact;
6808 }
6809 }
6810
6811 if (mpd_issubnormal(result, ctx)) {
6812 *status |= MPD_Subnormal;
6813 }
6814}
6815
6816/*
6817 * Rescale a number so that it has exponent 'exp'. Does not regard context
6818 * precision, emax, emin, but uses the rounding mode. Special numbers are
6819 * quietly copied. Restrictions:
6820 *
6821 * MPD_MIN_ETINY <= exp <= MPD_MAX_EMAX+1
6822 * result->digits <= MPD_MAX_PREC+1
6823 */
6824void
6825mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
6826 const mpd_context_t *ctx, uint32_t *status)
6827{
6828 if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY) {
6829 mpd_seterror(result, MPD_Invalid_operation, status);
6830 return;
6831 }
6832
6833 _mpd_qrescale(result, a, exp, ctx, status);
6834}
6835
6836/*
6837 * Same as mpd_qrescale, but with relaxed restrictions. The result of this
6838 * function should only be used for formatting a number and never as input
6839 * for other operations.
6840 *
6841 * MPD_MIN_ETINY-MPD_MAX_PREC <= exp <= MPD_MAX_EMAX+1
6842 * result->digits <= MPD_MAX_PREC+1
6843 */
6844void
6845mpd_qrescale_fmt(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
6846 const mpd_context_t *ctx, uint32_t *status)
6847{
6848 if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY-MPD_MAX_PREC) {
6849 mpd_seterror(result, MPD_Invalid_operation, status);
6850 return;
6851 }
6852
6853 _mpd_qrescale(result, a, exp, ctx, status);
6854}
6855
6856/* Round to an integer according to 'action' and ctx->round. */
6857enum {TO_INT_EXACT, TO_INT_SILENT, TO_INT_TRUNC};
6858static void
6859_mpd_qround_to_integral(int action, mpd_t *result, const mpd_t *a,
6860 const mpd_context_t *ctx, uint32_t *status)
6861{
6862 mpd_uint_t rnd;
6863
6864 if (mpd_isspecial(a)) {
6865 if (mpd_qcheck_nan(result, a, ctx, status)) {
6866 return;
6867 }
6868 mpd_qcopy(result, a, status);
6869 return;
6870 }
6871 if (a->exp >= 0) {
6872 mpd_qcopy(result, a, status);
6873 return;
6874 }
6875 if (mpd_iszerocoeff(a)) {
6876 _settriple(result, mpd_sign(a), 0, 0);
6877 return;
6878 }
6879
6880 rnd = mpd_qshiftr(result, a, -a->exp, status);
6881 if (rnd == MPD_UINT_MAX) {
6882 return;
6883 }
6884 result->exp = 0;
6885
6886 if (action == TO_INT_EXACT || action == TO_INT_SILENT) {
6887 _mpd_apply_round_excess(result, rnd, ctx, status);
6888 if (action == TO_INT_EXACT) {
6889 *status |= MPD_Rounded;
6890 if (rnd) {
6891 *status |= MPD_Inexact;
6892 }
6893 }
6894 }
6895}
6896
6897void
6898mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6899 uint32_t *status)
6900{
6901 (void)_mpd_qround_to_integral(TO_INT_EXACT, result, a, ctx, status);
6902}
6903
6904void
6905mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6906 uint32_t *status)
6907{
6908 (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a, ctx, status);
6909}
6910
6911void
6912mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
Stefan Krah3077ab82012-06-23 00:31:04 +02006913 uint32_t *status)
Stefan Krah1919b7e2012-03-21 18:25:23 +01006914{
6915 (void)_mpd_qround_to_integral(TO_INT_TRUNC, result, a, ctx, status);
6916}
6917
6918void
6919mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6920 uint32_t *status)
6921{
6922 mpd_context_t workctx = *ctx;
6923 workctx.round = MPD_ROUND_FLOOR;
6924 (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
6925 &workctx, status);
6926}
6927
6928void
6929mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
6930 uint32_t *status)
6931{
6932 mpd_context_t workctx = *ctx;
6933 workctx.round = MPD_ROUND_CEILING;
6934 (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
6935 &workctx, status);
6936}
6937
6938int
6939mpd_same_quantum(const mpd_t *a, const mpd_t *b)
6940{
6941 if (mpd_isspecial(a) || mpd_isspecial(b)) {
6942 return ((mpd_isnan(a) && mpd_isnan(b)) ||
Stefan Krahcd9e1d02012-03-23 16:22:05 +01006943 (mpd_isinfinite(a) && mpd_isinfinite(b)));
Stefan Krah1919b7e2012-03-21 18:25:23 +01006944 }
6945
6946 return a->exp == b->exp;
6947}
6948
6949/* Schedule the increase in precision for the Newton iteration. */
6950static inline int
6951recpr_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
6952 mpd_ssize_t maxprec, mpd_ssize_t initprec)
6953{
6954 mpd_ssize_t k;
6955 int i;
6956
6957 assert(maxprec > 0 && initprec > 0);
6958 if (maxprec <= initprec) return -1;
6959
6960 i = 0; k = maxprec;
6961 do {
6962 k = (k+1) / 2;
6963 klist[i++] = k;
6964 } while (k > initprec);
6965
6966 return i-1;
6967}
6968
Stefan Krah3c23a872012-04-20 19:59:20 +02006969/*
6970 * Initial approximation for the reciprocal:
6971 * k_0 := MPD_RDIGITS-2
6972 * z_0 := 10**(-k_0) * floor(10**(2*k_0 + 2) / floor(v * 10**(k_0 + 2)))
6973 * Absolute error:
6974 * |1/v - z_0| < 10**(-k_0)
6975 * ACL2 proof: maxerror-inverse-approx
6976 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01006977static void
6978_mpd_qreciprocal_approx(mpd_t *z, const mpd_t *v, uint32_t *status)
6979{
Stefan Krah3c23a872012-04-20 19:59:20 +02006980 mpd_uint_t p10data[2] = {0, mpd_pow10[MPD_RDIGITS-2]};
Stefan Krah1919b7e2012-03-21 18:25:23 +01006981 mpd_uint_t dummy, word;
6982 int n;
6983
Stefan Krahc51b7fd2012-04-18 19:27:32 +02006984 assert(v->exp == -v->digits);
6985
Stefan Krah1919b7e2012-03-21 18:25:23 +01006986 _mpd_get_msdigits(&dummy, &word, v, MPD_RDIGITS);
6987 n = mpd_word_digits(word);
6988 word *= mpd_pow10[MPD_RDIGITS-n];
6989
6990 mpd_qresize(z, 2, status);
6991 (void)_mpd_shortdiv(z->data, p10data, 2, word);
6992
6993 mpd_clear_flags(z);
Stefan Krahc51b7fd2012-04-18 19:27:32 +02006994 z->exp = -(MPD_RDIGITS-2);
Stefan Krah1919b7e2012-03-21 18:25:23 +01006995 z->len = (z->data[1] == 0) ? 1 : 2;
6996 mpd_setdigits(z);
6997}
6998
Stefan Krah3c23a872012-04-20 19:59:20 +02006999/*
7000 * Reciprocal, calculated with Newton's Method. Assumption: result != a.
7001 * NOTE: The comments in the function show that certain operations are
7002 * exact. The proof for the maximum error is too long to fit in here.
7003 * ACL2 proof: maxerror-inverse-complete
7004 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007005static void
7006_mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7007 uint32_t *status)
7008{
7009 mpd_context_t varcontext, maxcontext;
7010 mpd_t *z = result; /* current approximation */
7011 mpd_t *v; /* a, normalized to a number between 0.1 and 1 */
Stefan Krahc51b7fd2012-04-18 19:27:32 +02007012 MPD_NEW_SHARED(vtmp, a); /* v shares data with a */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007013 MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
7014 MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
7015 MPD_NEW_CONST(two,0,0,1,1,1,2); /* const 2 */
7016 mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
7017 mpd_ssize_t adj, maxprec, initprec;
7018 uint8_t sign = mpd_sign(a);
7019 int i;
7020
Stefan Krah1919b7e2012-03-21 18:25:23 +01007021 assert(result != a);
7022
Stefan Krahc51b7fd2012-04-18 19:27:32 +02007023 v = &vtmp;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007024 mpd_clear_flags(v);
7025 adj = v->digits + v->exp;
7026 v->exp = -v->digits;
7027
Stefan Krah3c23a872012-04-20 19:59:20 +02007028 /* Initial approximation */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007029 _mpd_qreciprocal_approx(z, v, status);
7030
7031 mpd_maxcontext(&varcontext);
7032 mpd_maxcontext(&maxcontext);
Stefan Krah3c23a872012-04-20 19:59:20 +02007033 varcontext.round = maxcontext.round = MPD_ROUND_TRUNC;
7034 varcontext.emax = maxcontext.emax = MPD_MAX_EMAX + 100;
7035 varcontext.emin = maxcontext.emin = MPD_MIN_EMIN - 100;
7036 maxcontext.prec = MPD_MAX_PREC + 100;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007037
Stefan Krah3c23a872012-04-20 19:59:20 +02007038 maxprec = ctx->prec;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007039 maxprec += 2;
7040 initprec = MPD_RDIGITS-3;
7041
7042 i = recpr_schedule_prec(klist, maxprec, initprec);
7043 for (; i >= 0; i--) {
Stefan Krah3c23a872012-04-20 19:59:20 +02007044 /* Loop invariant: z->digits <= klist[i]+7 */
7045 /* Let s := z**2, exact result */
7046 _mpd_qmul_exact(&s, z, z, &maxcontext, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007047 varcontext.prec = 2*klist[i] + 5;
7048 if (v->digits > varcontext.prec) {
Stefan Krah3c23a872012-04-20 19:59:20 +02007049 /* Let t := v, truncated to n >= 2*k+5 fraction digits */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007050 mpd_qshiftr(&t, v, v->digits-varcontext.prec, status);
7051 t.exp = -varcontext.prec;
Stefan Krah3c23a872012-04-20 19:59:20 +02007052 /* Let t := trunc(v)*s, truncated to n >= 2*k+1 fraction digits */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007053 mpd_qmul(&t, &t, &s, &varcontext, status);
7054 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007055 else { /* v->digits <= 2*k+5 */
7056 /* Let t := v*s, truncated to n >= 2*k+1 fraction digits */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007057 mpd_qmul(&t, v, &s, &varcontext, status);
7058 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007059 /* Let s := 2*z, exact result */
7060 _mpd_qmul_exact(&s, z, &two, &maxcontext, status);
7061 /* s.digits < t.digits <= 2*k+5, |adjexp(s)-adjexp(t)| <= 1,
7062 * so the subtraction generates at most 2*k+6 <= klist[i+1]+7
7063 * digits. The loop invariant is preserved. */
7064 _mpd_qsub_exact(z, &s, &t, &maxcontext, status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007065 }
7066
7067 if (!mpd_isspecial(z)) {
7068 z->exp -= adj;
7069 mpd_set_flags(z, sign);
7070 }
7071
7072 mpd_del(&s);
7073 mpd_del(&t);
7074 mpd_qfinalize(z, ctx, status);
7075}
7076
7077/*
Stefan Krah3c23a872012-04-20 19:59:20 +02007078 * Internal function for large numbers:
7079 *
7080 * q, r = divmod(coeff(a), coeff(b))
7081 *
7082 * Strategy: Multiply the dividend by the reciprocal of the divisor. The
Stefan Krah50b0a362012-06-20 23:38:51 +02007083 * inexact result is fixed by a small loop, using at most one iteration.
Stefan Krah3c23a872012-04-20 19:59:20 +02007084 *
7085 * ACL2 proofs:
7086 * ------------
7087 * 1) q is a natural number. (ndivmod-quotient-natp)
7088 * 2) r is a natural number. (ndivmod-remainder-natp)
7089 * 3) a = q * b + r (ndivmod-q*b+r==a)
7090 * 4) r < b (ndivmod-remainder-<-b)
Stefan Krah1919b7e2012-03-21 18:25:23 +01007091 */
7092static void
Stefan Krah3c23a872012-04-20 19:59:20 +02007093_mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
7094 uint32_t *status)
Stefan Krah1919b7e2012-03-21 18:25:23 +01007095{
7096 mpd_context_t workctx;
7097 mpd_t *qq = q, *rr = r;
7098 mpd_t aa, bb;
7099 int k;
7100
Stefan Krah1919b7e2012-03-21 18:25:23 +01007101 _mpd_copy_shared(&aa, a);
7102 _mpd_copy_shared(&bb, b);
7103
7104 mpd_set_positive(&aa);
7105 mpd_set_positive(&bb);
7106 aa.exp = 0;
7107 bb.exp = 0;
7108
7109 if (q == a || q == b) {
7110 if ((qq = mpd_qnew()) == NULL) {
7111 *status |= MPD_Malloc_error;
7112 goto nanresult;
7113 }
7114 }
7115 if (r == a || r == b) {
7116 if ((rr = mpd_qnew()) == NULL) {
7117 *status |= MPD_Malloc_error;
7118 goto nanresult;
7119 }
7120 }
7121
Stefan Krah3c23a872012-04-20 19:59:20 +02007122 mpd_maxcontext(&workctx);
7123
7124 /* Let prec := adigits - bdigits + 4 */
7125 workctx.prec = a->digits - b->digits + 1 + 3;
7126 if (a->digits > MPD_MAX_PREC || workctx.prec > MPD_MAX_PREC) {
7127 *status |= MPD_Division_impossible;
7128 goto nanresult;
7129 }
7130
7131 /* Let x := _mpd_qreciprocal(b, prec)
7132 * Then x is bounded by:
7133 * 1) 1/b - 10**(-prec - bdigits) < x < 1/b + 10**(-prec - bdigits)
7134 * 2) 1/b - 10**(-adigits - 4) < x < 1/b + 10**(-adigits - 4)
7135 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007136 _mpd_qreciprocal(rr, &bb, &workctx, &workctx.status);
7137
Stefan Krah3c23a872012-04-20 19:59:20 +02007138 /* Get an estimate for the quotient. Let q := a * x
7139 * Then q is bounded by:
7140 * 3) a/b - 10**-4 < q < a/b + 10**-4
7141 */
7142 _mpd_qmul(qq, &aa, rr, &workctx, &workctx.status);
7143 /* Truncate q to an integer:
7144 * 4) a/b - 2 < trunc(q) < a/b + 1
7145 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007146 mpd_qtrunc(qq, qq, &workctx, &workctx.status);
7147
7148 workctx.prec = aa.digits + 3;
Stefan Krah3c23a872012-04-20 19:59:20 +02007149 workctx.emax = MPD_MAX_EMAX + 3;
7150 workctx.emin = MPD_MIN_EMIN - 3;
7151 /* Multiply the estimate for q by b:
7152 * 5) a - 2 * b < trunc(q) * b < a + b
7153 */
7154 _mpd_qmul(rr, &bb, qq, &workctx, &workctx.status);
7155 /* Get the estimate for r such that a = q * b + r. */
7156 _mpd_qsub_exact(rr, &aa, rr, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007157
Stefan Krah3c23a872012-04-20 19:59:20 +02007158 /* Fix the result. At this point -b < r < 2*b, so the correction loop
7159 takes at most one iteration. */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007160 for (k = 0;; k++) {
Stefan Krah3c23a872012-04-20 19:59:20 +02007161 if (mpd_isspecial(qq) || mpd_isspecial(rr)) {
Stefan Krah1919b7e2012-03-21 18:25:23 +01007162 *status |= (workctx.status&MPD_Errors);
7163 goto nanresult;
7164 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007165 if (k > 2) { /* Allow two iterations despite the proof. */
7166 mpd_err_warn("libmpdec: internal error in " /* GCOV_NOT_REACHED */
7167 "_mpd_base_ndivmod: please report"); /* GCOV_NOT_REACHED */
7168 *status |= MPD_Invalid_operation; /* GCOV_NOT_REACHED */
7169 goto nanresult; /* GCOV_NOT_REACHED */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007170 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007171 /* r < 0 */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007172 else if (_mpd_cmp(&zero, rr) == 1) {
Stefan Krah3c23a872012-04-20 19:59:20 +02007173 _mpd_qadd_exact(rr, rr, &bb, &workctx, &workctx.status);
7174 _mpd_qadd_exact(qq, qq, &minus_one, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007175 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007176 /* 0 <= r < b */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007177 else if (_mpd_cmp(rr, &bb) == -1) {
7178 break;
7179 }
Stefan Krah3c23a872012-04-20 19:59:20 +02007180 /* r >= b */
Stefan Krah1919b7e2012-03-21 18:25:23 +01007181 else {
Stefan Krah3c23a872012-04-20 19:59:20 +02007182 _mpd_qsub_exact(rr, rr, &bb, &workctx, &workctx.status);
7183 _mpd_qadd_exact(qq, qq, &one, &workctx, &workctx.status);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007184 }
7185 }
7186
7187 if (qq != q) {
7188 if (!mpd_qcopy(q, qq, status)) {
7189 goto nanresult; /* GCOV_UNLIKELY */
7190 }
7191 mpd_del(qq);
7192 }
7193 if (rr != r) {
7194 if (!mpd_qcopy(r, rr, status)) {
7195 goto nanresult; /* GCOV_UNLIKELY */
7196 }
7197 mpd_del(rr);
7198 }
7199
7200 *status |= (workctx.status&MPD_Errors);
7201 return;
7202
7203
7204nanresult:
7205 if (qq && qq != q) mpd_del(qq);
7206 if (rr && rr != r) mpd_del(rr);
7207 mpd_setspecial(q, MPD_POS, MPD_NAN);
7208 mpd_setspecial(r, MPD_POS, MPD_NAN);
7209}
7210
7211static inline int
7212invroot_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
7213 mpd_ssize_t maxprec, mpd_ssize_t initprec)
7214{
7215 mpd_ssize_t k;
7216 int i;
7217
7218 assert(maxprec >= 3 && initprec >= 3);
7219 if (maxprec <= initprec) return -1;
7220
7221 i = 0; k = maxprec;
7222 do {
7223 k = (k+3) / 2;
7224 klist[i++] = k;
7225 } while (k > initprec);
7226
7227 return i-1;
7228}
7229
7230/*
7231 * Initial approximation for the inverse square root.
7232 *
7233 * Input:
7234 * v := 7 or 8 decimal digits with an implicit exponent of 10**-6,
7235 * representing a number 1 <= x < 100.
7236 *
7237 * Output:
7238 * An approximation to 1/sqrt(v)
7239 */
7240static inline void
7241_invroot_init_approx(mpd_t *z, mpd_uint_t v)
7242{
7243 mpd_uint_t lo = 1000;
7244 mpd_uint_t hi = 10000;
7245 mpd_uint_t a, sq;
7246
7247 assert(v >= lo*lo && v < (hi+1)*(hi+1));
7248
7249 for(;;) {
7250 a = (lo + hi) / 2;
7251 sq = a * a;
7252 if (v >= sq) {
7253 if (v < sq + 2*a + 1) {
7254 break;
7255 }
7256 lo = a + 1;
7257 }
7258 else {
7259 hi = a - 1;
7260 }
7261 }
7262
7263 /* At this point a/1000 is an approximation to sqrt(v). */
7264 mpd_minalloc(z);
7265 mpd_clear_flags(z);
7266 z->data[0] = 1000000000UL / a;
7267 z->len = 1;
7268 z->exp = -6;
7269 mpd_setdigits(z);
7270}
7271
7272static void
7273_mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7274 uint32_t *status)
7275{
7276 uint32_t workstatus = 0;
7277 mpd_context_t varcontext, maxcontext;
7278 mpd_t *z = result; /* current approximation */
7279 mpd_t *v; /* a, normalized to a number between 1 and 100 */
7280 MPD_NEW_SHARED(vtmp, a); /* by default v will share data with a */
7281 MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
7282 MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
7283 MPD_NEW_CONST(one_half,0,-1,1,1,1,5);
7284 MPD_NEW_CONST(three,0,0,1,1,1,3);
7285 mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
7286 mpd_ssize_t ideal_exp, shift;
7287 mpd_ssize_t adj, tz;
7288 mpd_ssize_t maxprec, fracdigits;
7289 mpd_uint_t x, dummy;
7290 int i, n;
7291
7292
7293 ideal_exp = -(a->exp - (a->exp & 1)) / 2;
7294
7295 v = &vtmp;
7296 if (result == a) {
7297 if ((v = mpd_qncopy(a)) == NULL) {
7298 mpd_seterror(result, MPD_Malloc_error, status);
7299 return;
7300 }
7301 }
7302
7303 /* normalize a to 1 <= v < 100 */
7304 if ((v->digits+v->exp) & 1) {
7305 fracdigits = v->digits - 1;
7306 v->exp = -fracdigits;
7307 n = (v->digits > 7) ? 7 : (int)v->digits;
7308 _mpd_get_msdigits(&dummy, &x, v, n);
7309 if (n < 7) {
7310 x *= mpd_pow10[7-n];
7311 }
7312 }
7313 else {
7314 fracdigits = v->digits - 2;
7315 v->exp = -fracdigits;
7316 n = (v->digits > 8) ? 8 : (int)v->digits;
7317 _mpd_get_msdigits(&dummy, &x, v, n);
7318 if (n < 8) {
7319 x *= mpd_pow10[8-n];
7320 }
7321 }
7322 adj = (a->exp-v->exp) / 2;
7323
7324 /* initial approximation */
7325 _invroot_init_approx(z, x);
7326
7327 mpd_maxcontext(&maxcontext);
7328 mpd_maxcontext(&varcontext);
7329 varcontext.round = MPD_ROUND_TRUNC;
7330 maxprec = ctx->prec + 2;
7331
7332 i = invroot_schedule_prec(klist, maxprec, 3);
7333 for (; i >= 0; i--) {
7334 varcontext.prec = 2*klist[i]+2;
7335 mpd_qmul(&s, z, z, &maxcontext, &workstatus);
7336 if (v->digits > varcontext.prec) {
7337 shift = v->digits - varcontext.prec;
7338 mpd_qshiftr(&t, v, shift, &workstatus);
7339 t.exp += shift;
7340 mpd_qmul(&t, &t, &s, &varcontext, &workstatus);
7341 }
7342 else {
7343 mpd_qmul(&t, v, &s, &varcontext, &workstatus);
7344 }
7345 mpd_qsub(&t, &three, &t, &maxcontext, &workstatus);
7346 mpd_qmul(z, z, &t, &varcontext, &workstatus);
7347 mpd_qmul(z, z, &one_half, &maxcontext, &workstatus);
7348 }
7349
7350 z->exp -= adj;
7351
7352 tz = mpd_trail_zeros(result);
7353 shift = ideal_exp - result->exp;
7354 shift = (tz > shift) ? shift : tz;
7355 if (shift > 0) {
7356 mpd_qshiftr_inplace(result, shift);
7357 result->exp += shift;
7358 }
7359
7360
7361 mpd_del(&s);
7362 mpd_del(&t);
7363 if (v != &vtmp) mpd_del(v);
7364 *status |= (workstatus&MPD_Errors);
7365 varcontext = *ctx;
7366 varcontext.round = MPD_ROUND_HALF_EVEN;
7367 mpd_qfinalize(result, &varcontext, status);
7368}
7369
7370void
7371mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7372 uint32_t *status)
7373{
7374
7375 if (mpd_isspecial(a)) {
7376 if (mpd_qcheck_nan(result, a, ctx, status)) {
7377 return;
7378 }
7379 if (mpd_isnegative(a)) {
7380 mpd_seterror(result, MPD_Invalid_operation, status);
7381 return;
7382 }
7383 /* positive infinity */
7384 _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
7385 *status |= MPD_Clamped;
7386 return;
7387 }
7388 if (mpd_iszero(a)) {
7389 mpd_setspecial(result, mpd_sign(a), MPD_INF);
7390 *status |= MPD_Division_by_zero;
7391 return;
7392 }
7393 if (mpd_isnegative(a)) {
7394 mpd_seterror(result, MPD_Invalid_operation, status);
7395 return;
7396 }
7397
7398 _mpd_qinvroot(result, a, ctx, status);
7399}
7400
7401/*
7402 * Ensure correct rounding. Algorithm after Hull & Abrham, "Properly Rounded
7403 * Variable Precision Square Root", ACM Transactions on Mathematical Software,
7404 * Vol. 11, No. 3.
7405 */
7406static void
7407_mpd_fix_sqrt(mpd_t *result, const mpd_t *a, mpd_t *tmp,
7408 const mpd_context_t *ctx, uint32_t *status)
7409{
7410 mpd_context_t maxctx;
7411 MPD_NEW_CONST(u,0,0,1,1,1,5);
7412
7413 mpd_maxcontext(&maxctx);
7414 u.exp = u.digits - ctx->prec + result->exp - 1;
7415
7416 _mpd_qsub(tmp, result, &u, &maxctx, status);
Stefan Krahcd9e1d02012-03-23 16:22:05 +01007417 if (*status&MPD_Errors) goto nanresult;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007418
7419 _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
Stefan Krahcd9e1d02012-03-23 16:22:05 +01007420 if (*status&MPD_Errors) goto nanresult;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007421
7422 if (_mpd_cmp(tmp, a) == 1) {
7423 u.exp += 1;
7424 u.data[0] = 1;
7425 _mpd_qsub(result, result, &u, &maxctx, status);
7426 }
7427 else {
7428 _mpd_qadd(tmp, result, &u, &maxctx, status);
Stefan Krahcd9e1d02012-03-23 16:22:05 +01007429 if (*status&MPD_Errors) goto nanresult;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007430
7431 _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
Stefan Krahcd9e1d02012-03-23 16:22:05 +01007432 if (*status&MPD_Errors) goto nanresult;
Stefan Krah1919b7e2012-03-21 18:25:23 +01007433
7434 if (_mpd_cmp(tmp, a) == -1) {
7435 u.exp += 1;
7436 u.data[0] = 1;
7437 _mpd_qadd(result, result, &u, &maxctx, status);
7438 }
7439 }
7440
7441 return;
7442
7443nanresult:
7444 mpd_setspecial(result, MPD_POS, MPD_NAN);
7445}
7446
7447void
7448mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
7449 uint32_t *status)
7450{
7451 uint32_t workstatus = 0;
7452 mpd_context_t varcontext;
7453 mpd_t *z = result; /* current approximation */
7454 MPD_NEW_STATIC(v,0,0,0,0); /* a, normalized to a number between 1 and 10 */
7455 MPD_NEW_STATIC(vtmp,0,0,0,0);
7456 MPD_NEW_STATIC(tmp,0,0,0,0);
7457 mpd_ssize_t ideal_exp, shift;
7458 mpd_ssize_t target_prec, fracdigits;
7459 mpd_ssize_t a_exp, a_digits;
7460 mpd_ssize_t adj, tz;
7461 mpd_uint_t dummy, t;
7462 int exact = 0;
7463
7464
7465 varcontext = *ctx;
7466 varcontext.round = MPD_ROUND_HALF_EVEN;
7467 ideal_exp = (a->exp - (a->exp & 1)) / 2;
7468
7469 if (mpd_isspecial(a)) {
7470 if (mpd_qcheck_nan(result, a, ctx, status)) {
7471 return;
7472 }
7473 if (mpd_isnegative(a)) {
7474 mpd_seterror(result, MPD_Invalid_operation, status);
7475 return;
7476 }
7477 mpd_setspecial(result, MPD_POS, MPD_INF);
7478 return;
7479 }
7480 if (mpd_iszero(a)) {
7481 _settriple(result, mpd_sign(a), 0, ideal_exp);
7482 mpd_qfinalize(result, ctx, status);
7483 return;
7484 }
7485 if (mpd_isnegative(a)) {
7486 mpd_seterror(result, MPD_Invalid_operation, status);
7487 return;
7488 }
7489
7490 if (!mpd_qcopy(&v, a, status)) {
7491 mpd_seterror(result, MPD_Malloc_error, status);
7492 goto finish;
7493 }
7494
7495 a_exp = a->exp;
7496 a_digits = a->digits;
7497
7498 /* normalize a to 1 <= v < 100 */
7499 if ((v.digits+v.exp) & 1) {
7500 fracdigits = v.digits - 1;
7501 v.exp = -fracdigits;
7502 _mpd_get_msdigits(&dummy, &t, &v, 3);
7503 t = t < 100 ? t*10 : t;
7504 t = t < 100 ? t*10 : t;
7505 }
7506 else {
7507 fracdigits = v.digits - 2;
7508 v.exp = -fracdigits;
7509 _mpd_get_msdigits(&dummy, &t, &v, 4);
7510 t = t < 1000 ? t*10 : t;
7511 t = t < 1000 ? t*10 : t;
7512 t = t < 1000 ? t*10 : t;
7513 }
7514 adj = (a_exp-v.exp) / 2;
7515
7516
7517 /* use excess digits */
7518 target_prec = (a_digits > ctx->prec) ? a_digits : ctx->prec;
7519 target_prec += 2;
7520 varcontext.prec = target_prec + 3;
7521
7522 /* invroot is much faster for large numbers */
7523 _mpd_qinvroot(&tmp, &v, &varcontext, &workstatus);
7524
7525 varcontext.prec = target_prec;
7526 _mpd_qdiv(NO_IDEAL_EXP, z, &one, &tmp, &varcontext, &workstatus);
7527
7528
7529 tz = mpd_trail_zeros(result);
7530 if ((result->digits-tz)*2-1 <= v.digits) {
7531 _mpd_qmul(&tmp, result, result, &varcontext, &workstatus);
7532 if (workstatus&MPD_Errors) {
7533 mpd_seterror(result, workstatus&MPD_Errors, status);
7534 goto finish;
7535 }
7536 exact = (_mpd_cmp(&tmp, &v) == 0);
7537 }
7538 *status |= (workstatus&MPD_Errors);
7539
7540 if (!exact && !mpd_isspecial(result) && !mpd_iszero(result)) {
7541 _mpd_fix_sqrt(result, &v, &tmp, &varcontext, status);
7542 if (mpd_isspecial(result)) goto finish;
7543 *status |= (MPD_Rounded|MPD_Inexact);
7544 }
7545
7546 result->exp += adj;
7547 if (exact) {
7548 shift = ideal_exp - result->exp;
7549 shift = (tz > shift) ? shift : tz;
7550 if (shift > 0) {
7551 mpd_qshiftr_inplace(result, shift);
7552 result->exp += shift;
7553 }
7554 }
7555
7556
7557finish:
7558 mpd_del(&v);
7559 mpd_del(&vtmp);
7560 mpd_del(&tmp);
7561 varcontext.prec = ctx->prec;
7562 mpd_qfinalize(result, &varcontext, status);
7563}
7564
7565
7566/******************************************************************************/
7567/* Base conversions */
7568/******************************************************************************/
7569
7570/*
7571 * Returns the space needed to represent an integer mpd_t in base 'base'.
7572 * The result is undefined for non-integers.
7573 *
7574 * Max space needed:
7575 *
7576 * base^n >= 10^(digits+exp)
7577 * n >= log10(10^(digits+exp))/log10(base) = (digits+exp) / log10(base)
7578 */
7579size_t
7580mpd_sizeinbase(mpd_t *a, uint32_t base)
7581{
7582 size_t x;
7583
7584 assert(mpd_isinteger(a));
7585 if (mpd_iszero(a)) {
7586 return 1;
7587 }
7588
7589 x = a->digits+a->exp;
7590
7591#ifdef CONFIG_64
7592 #ifdef USE_80BIT_LONG_DOUBLE
7593 return (long double)x / log10(base) + 3;
7594 #else
7595 /* x > floor(((1ULL<<53)-3) * log10(2)) */
7596 if (x > 2711437152599294ULL) {
7597 return SIZE_MAX;
7598 }
Stefan Krahe34a2092012-05-16 20:20:03 +02007599 return (size_t)((double)x / log10(base) + 3);
Stefan Krah1919b7e2012-03-21 18:25:23 +01007600 #endif
7601#else /* CONFIG_32 */
7602{
7603 double y = x / log10(base) + 3;
7604 return (y > SIZE_MAX) ? SIZE_MAX : (size_t)y;
7605}
7606#endif
7607}
7608
7609/*
7610 * Returns the space needed to import a base 'base' integer of length 'srclen'.
7611 */
7612static inline mpd_ssize_t
7613_mpd_importsize(size_t srclen, uint32_t base)
7614{
7615#if SIZE_MAX == UINT64_MAX
7616 #ifdef USE_80BIT_LONG_DOUBLE
7617 long double x = (long double)srclen * (log10(base)/MPD_RDIGITS) + 3;
7618 #else
7619 double x;
7620 if (srclen > (1ULL<<53)) {
7621 return MPD_SSIZE_MAX;
7622 }
7623 x = (double)srclen * (log10(base)/MPD_RDIGITS) + 3;
7624 #endif
7625#else
7626 double x = srclen * (log10(base)/MPD_RDIGITS) + 3;
7627#endif
7628 return (x > MPD_MAXIMPORT) ? MPD_SSIZE_MAX : (mpd_ssize_t)x;
7629}
7630
7631
7632static inline size_t
7633_to_base_u16(uint16_t *w, size_t wlen, mpd_uint_t wbase,
7634 mpd_uint_t *u, mpd_ssize_t ulen)
7635{
7636 size_t n = 0;
7637
7638 assert(wlen > 0 && ulen > 0);
7639
7640 do {
7641 w[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
7642 /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
7643 ulen = _mpd_real_size(u, ulen);
7644
7645 } while (u[ulen-1] != 0 && n < wlen);
7646
7647 /* proper termination condition */
7648 assert(u[ulen-1] == 0);
7649
7650 return n;
7651}
7652
7653static inline void
7654_from_base_u16(mpd_uint_t *w, mpd_ssize_t wlen,
7655 const mpd_uint_t *u, size_t ulen, uint32_t ubase)
7656{
7657 mpd_ssize_t m = 1;
7658 mpd_uint_t carry;
7659
7660 assert(wlen > 0 && ulen > 0);
7661
7662 w[0] = u[--ulen];
7663 while (--ulen != SIZE_MAX && m < wlen) {
7664 _mpd_shortmul(w, w, m, ubase);
7665 m = _mpd_real_size(w, m+1);
7666 carry = _mpd_shortadd(w, m, u[ulen]);
7667 if (carry) w[m++] = carry;
7668 }
7669
7670 /* proper termination condition */
7671 assert(ulen == SIZE_MAX);
7672}
7673
7674/* target base wbase <= source base ubase */
7675static inline size_t
7676_baseconv_to_smaller(uint32_t *w, size_t wlen, mpd_uint_t wbase,
7677 mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase)
7678{
7679 size_t n = 0;
7680
7681 assert(wlen > 0 && ulen > 0);
7682
7683 do {
7684 w[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
7685 /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
7686 ulen = _mpd_real_size(u, ulen);
7687
7688 } while (u[ulen-1] != 0 && n < wlen);
7689
7690 /* proper termination condition */
7691 assert(u[ulen-1] == 0);
7692
7693 return n;
7694}
7695
7696/* target base wbase >= source base ubase */
7697static inline void
7698_baseconv_to_larger(mpd_uint_t *w, mpd_ssize_t wlen, mpd_uint_t wbase,
7699 const mpd_uint_t *u, size_t ulen, mpd_uint_t ubase)
7700{
7701 mpd_ssize_t m = 1;
7702 mpd_uint_t carry;
7703
7704 assert(wlen > 0 && ulen > 0);
7705
7706 w[0] = u[--ulen];
7707 while (--ulen != SIZE_MAX && m < wlen) {
7708 _mpd_shortmul_b(w, w, m, ubase, wbase);
7709 m = _mpd_real_size(w, m+1);
7710 carry = _mpd_shortadd_b(w, m, u[ulen], wbase);
7711 if (carry) w[m++] = carry;
7712 }
7713
7714 /* proper termination condition */
7715 assert(ulen == SIZE_MAX);
7716}
7717
7718
7719/*
7720 * Converts an integer mpd_t to a multiprecision integer with
7721 * base <= UINT16_MAX+1. The least significant word of the result
7722 * is rdata[0].
7723 */
7724size_t
7725mpd_qexport_u16(uint16_t *rdata, size_t rlen, uint32_t rbase,
7726 const mpd_t *src, uint32_t *status)
7727{
7728 mpd_t *tsrc;
7729 size_t n;
7730
7731 assert(rbase <= (1U<<16));
7732 assert(rlen <= SIZE_MAX/(sizeof *rdata));
7733
7734 if (mpd_isspecial(src) || !_mpd_isint(src)) {
7735 *status |= MPD_Invalid_operation;
7736 return SIZE_MAX;
7737 }
7738
7739 memset(rdata, 0, rlen * (sizeof *rdata));
7740
7741 if (mpd_iszero(src)) {
7742 return 1;
7743 }
7744
7745 if ((tsrc = mpd_qnew()) == NULL) {
7746 *status |= MPD_Malloc_error;
7747 return SIZE_MAX;
7748 }
7749
7750 if (src->exp >= 0) {
7751 if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
7752 mpd_del(tsrc);
7753 return SIZE_MAX;
7754 }
7755 }
7756 else {
7757 if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
7758 mpd_del(tsrc);
7759 return SIZE_MAX;
7760 }
7761 }
7762
7763 n = _to_base_u16(rdata, rlen, rbase, tsrc->data, tsrc->len);
7764
7765 mpd_del(tsrc);
7766 return n;
7767}
7768
7769/*
7770 * Converts an integer mpd_t to a multiprecision integer with
7771 * base <= UINT32_MAX. The least significant word of the result
7772 * is rdata[0].
7773 */
7774size_t
7775mpd_qexport_u32(uint32_t *rdata, size_t rlen, uint32_t rbase,
7776 const mpd_t *src, uint32_t *status)
7777{
7778 mpd_t *tsrc;
7779 size_t n;
7780
7781 if (mpd_isspecial(src) || !_mpd_isint(src)) {
7782 *status |= MPD_Invalid_operation;
7783 return SIZE_MAX;
7784 }
7785#if MPD_SIZE_MAX < SIZE_MAX
7786 if (rlen > MPD_SSIZE_MAX) {
7787 *status |= MPD_Invalid_operation;
7788 return SIZE_MAX;
7789 }
7790#endif
7791
7792 assert(rlen <= SIZE_MAX/(sizeof *rdata));
7793 memset(rdata, 0, rlen * (sizeof *rdata));
7794
7795 if (mpd_iszero(src)) {
7796 return 1;
7797 }
7798
7799 if ((tsrc = mpd_qnew()) == NULL) {
7800 *status |= MPD_Malloc_error;
7801 return SIZE_MAX;
7802 }
7803
7804 if (src->exp >= 0) {
7805 if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
7806 mpd_del(tsrc);
7807 return SIZE_MAX;
7808 }
7809 }
7810 else {
7811 if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
7812 mpd_del(tsrc);
7813 return SIZE_MAX;
7814 }
7815 }
7816
7817#ifdef CONFIG_64
7818 n = _baseconv_to_smaller(rdata, rlen, rbase,
7819 tsrc->data, tsrc->len, MPD_RADIX);
7820#else
7821 if (rbase <= MPD_RADIX) {
7822 n = _baseconv_to_smaller(rdata, rlen, rbase,
7823 tsrc->data, tsrc->len, MPD_RADIX);
7824 }
7825 else {
7826 _baseconv_to_larger(rdata, (mpd_ssize_t)rlen, rbase,
7827 tsrc->data, tsrc->len, MPD_RADIX);
7828 n = _mpd_real_size(rdata, (mpd_ssize_t)rlen);
7829 }
7830#endif
7831
7832 mpd_del(tsrc);
7833 return n;
7834}
7835
7836
7837/*
7838 * Converts a multiprecision integer with base <= UINT16_MAX+1 to an mpd_t.
7839 * The least significant word of the source is srcdata[0].
7840 */
7841void
7842mpd_qimport_u16(mpd_t *result,
7843 const uint16_t *srcdata, size_t srclen,
7844 uint8_t srcsign, uint32_t srcbase,
7845 const mpd_context_t *ctx, uint32_t *status)
7846{
7847 mpd_uint_t *usrc; /* uint16_t src copied to an mpd_uint_t array */
7848 mpd_ssize_t rlen; /* length of the result */
7849 size_t n = 0;
7850
7851 assert(srclen > 0);
7852 assert(srcbase <= (1U<<16));
7853
7854 if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
7855 mpd_seterror(result, MPD_Invalid_operation, status);
7856 return;
7857 }
7858 if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
7859 mpd_seterror(result, MPD_Invalid_operation, status);
7860 return;
7861 }
7862 if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
7863 mpd_seterror(result, MPD_Malloc_error, status);
7864 return;
7865 }
7866 for (n = 0; n < srclen; n++) {
7867 usrc[n] = srcdata[n];
7868 }
7869
7870 /* result->data is initialized to zero */
7871 if (!mpd_qresize_zero(result, rlen, status)) {
7872 goto finish;
7873 }
7874
7875 _from_base_u16(result->data, rlen, usrc, srclen, srcbase);
7876
7877 mpd_set_flags(result, srcsign);
7878 result->exp = 0;
7879 result->len = _mpd_real_size(result->data, rlen);
7880 mpd_setdigits(result);
7881
7882 mpd_qresize(result, result->len, status);
7883 mpd_qfinalize(result, ctx, status);
7884
7885
7886finish:
7887 mpd_free(usrc);
7888}
7889
7890/*
7891 * Converts a multiprecision integer with base <= UINT32_MAX to an mpd_t.
7892 * The least significant word of the source is srcdata[0].
7893 */
7894void
7895mpd_qimport_u32(mpd_t *result,
7896 const uint32_t *srcdata, size_t srclen,
7897 uint8_t srcsign, uint32_t srcbase,
7898 const mpd_context_t *ctx, uint32_t *status)
7899{
7900 mpd_uint_t *usrc; /* uint32_t src copied to an mpd_uint_t array */
7901 mpd_ssize_t rlen; /* length of the result */
7902 size_t n = 0;
7903
7904 assert(srclen > 0);
7905
7906 if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
7907 mpd_seterror(result, MPD_Invalid_operation, status);
7908 return;
7909 }
7910 if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
7911 mpd_seterror(result, MPD_Invalid_operation, status);
7912 return;
7913 }
7914 if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
7915 mpd_seterror(result, MPD_Malloc_error, status);
7916 return;
7917 }
7918 for (n = 0; n < srclen; n++) {
7919 usrc[n] = srcdata[n];
7920 }
7921
7922 /* result->data is initialized to zero */
7923 if (!mpd_qresize_zero(result, rlen, status)) {
7924 goto finish;
7925 }
7926
7927#ifdef CONFIG_64
7928 _baseconv_to_larger(result->data, rlen, MPD_RADIX,
7929 usrc, srclen, srcbase);
7930#else
7931 if (srcbase <= MPD_RADIX) {
7932 _baseconv_to_larger(result->data, rlen, MPD_RADIX,
7933 usrc, srclen, srcbase);
7934 }
7935 else {
7936 _baseconv_to_smaller(result->data, rlen, MPD_RADIX,
7937 usrc, (mpd_ssize_t)srclen, srcbase);
7938 }
7939#endif
7940
7941 mpd_set_flags(result, srcsign);
7942 result->exp = 0;
7943 result->len = _mpd_real_size(result->data, rlen);
7944 mpd_setdigits(result);
7945
7946 mpd_qresize(result, result->len, status);
7947 mpd_qfinalize(result, ctx, status);
7948
7949
7950finish:
7951 mpd_free(usrc);
7952}
7953
7954
7955