blob: 0713632424f306e5a346f391dba26c008f8d3e21 [file] [log] [blame]
Bo Xu8ad00402014-12-02 13:06:22 -08001// Copyright 2014 PDFium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Original code by Matt McCutchen, see the LICENSE file.
6
7#include "BigUnsigned.hh"
8
9// Memory management definitions have moved to the bottom of NumberlikeArray.hh.
10
11// The templates used by these constructors and converters are at the bottom of
12// BigUnsigned.hh.
13
14BigUnsigned::BigUnsigned(unsigned long x) { initFromPrimitive (x); }
15BigUnsigned::BigUnsigned(unsigned int x) { initFromPrimitive (x); }
16BigUnsigned::BigUnsigned(unsigned short x) { initFromPrimitive (x); }
17BigUnsigned::BigUnsigned( long x) { initFromSignedPrimitive(x); }
18BigUnsigned::BigUnsigned( int x) { initFromSignedPrimitive(x); }
19BigUnsigned::BigUnsigned( short x) { initFromSignedPrimitive(x); }
20
21unsigned long BigUnsigned::toUnsignedLong () const { return convertToPrimitive <unsigned long >(); }
22unsigned int BigUnsigned::toUnsignedInt () const { return convertToPrimitive <unsigned int >(); }
23unsigned short BigUnsigned::toUnsignedShort() const { return convertToPrimitive <unsigned short>(); }
24long BigUnsigned::toLong () const { return convertToSignedPrimitive< long >(); }
25int BigUnsigned::toInt () const { return convertToSignedPrimitive< int >(); }
26short BigUnsigned::toShort () const { return convertToSignedPrimitive< short>(); }
27
28// BIT/BLOCK ACCESSORS
29
30void BigUnsigned::setBlock(Index i, Blk newBlock) {
31 if (newBlock == 0) {
32 if (i < len) {
33 blk[i] = 0;
34 zapLeadingZeros();
35 }
36 // If i >= len, no effect.
37 } else {
38 if (i >= len) {
39 // The nonzero block extends the number.
40 allocateAndCopy(i+1);
41 // Zero any added blocks that we aren't setting.
42 for (Index j = len; j < i; j++)
43 blk[j] = 0;
44 len = i+1;
45 }
46 blk[i] = newBlock;
47 }
48}
49
50/* Evidently the compiler wants BigUnsigned:: on the return type because, at
51 * that point, it hasn't yet parsed the BigUnsigned:: on the name to get the
52 * proper scope. */
53BigUnsigned::Index BigUnsigned::bitLength() const {
54 if (isZero())
55 return 0;
56 else {
57 Blk leftmostBlock = getBlock(len - 1);
58 Index leftmostBlockLen = 0;
59 while (leftmostBlock != 0) {
60 leftmostBlock >>= 1;
61 leftmostBlockLen++;
62 }
63 return leftmostBlockLen + (len - 1) * N;
64 }
65}
66
67void BigUnsigned::setBit(Index bi, bool newBit) {
68 Index blockI = bi / N;
69 Blk block = getBlock(blockI), mask = Blk(1) << (bi % N);
70 block = newBit ? (block | mask) : (block & ~mask);
71 setBlock(blockI, block);
72}
73
74// COMPARISON
75BigUnsigned::CmpRes BigUnsigned::compareTo(const BigUnsigned &x) const {
76 // A bigger length implies a bigger number.
77 if (len < x.len)
78 return less;
79 else if (len > x.len)
80 return greater;
81 else {
82 // Compare blocks one by one from left to right.
83 Index i = len;
84 while (i > 0) {
85 i--;
86 if (blk[i] == x.blk[i])
87 continue;
88 else if (blk[i] > x.blk[i])
89 return greater;
90 else
91 return less;
92 }
93 // If no blocks differed, the numbers are equal.
94 return equal;
95 }
96}
97
98// COPY-LESS OPERATIONS
99
100/*
101 * On most calls to copy-less operations, it's safe to read the inputs little by
102 * little and write the outputs little by little. However, if one of the
103 * inputs is coming from the same variable into which the output is to be
104 * stored (an "aliased" call), we risk overwriting the input before we read it.
105 * In this case, we first compute the result into a temporary BigUnsigned
106 * variable and then copy it into the requested output variable *this.
107 * Each put-here operation uses the DTRT_ALIASED macro (Do The Right Thing on
108 * aliased calls) to generate code for this check.
109 *
110 * I adopted this approach on 2007.02.13 (see Assignment Operators in
111 * BigUnsigned.hh). Before then, put-here operations rejected aliased calls
112 * with an exception. I think doing the right thing is better.
113 *
114 * Some of the put-here operations can probably handle aliased calls safely
115 * without the extra copy because (for example) they process blocks strictly
116 * right-to-left. At some point I might determine which ones don't need the
117 * copy, but my reasoning would need to be verified very carefully. For now
118 * I'll leave in the copy.
119 */
120#define DTRT_ALIASED(cond, op) \
121 if (cond) { \
122 BigUnsigned tmpThis; \
123 tmpThis.op; \
124 *this = tmpThis; \
125 return; \
126 }
127
128
129
130void BigUnsigned::add(const BigUnsigned &a, const BigUnsigned &b) {
131 DTRT_ALIASED(this == &a || this == &b, add(a, b));
132 // If one argument is zero, copy the other.
133 if (a.len == 0) {
134 operator =(b);
135 return;
136 } else if (b.len == 0) {
137 operator =(a);
138 return;
139 }
140 // Some variables...
141 // Carries in and out of an addition stage
142 bool carryIn, carryOut;
143 Blk temp;
144 Index i;
145 // a2 points to the longer input, b2 points to the shorter
146 const BigUnsigned *a2, *b2;
147 if (a.len >= b.len) {
148 a2 = &a;
149 b2 = &b;
150 } else {
151 a2 = &b;
152 b2 = &a;
153 }
154 // Set prelimiary length and make room in this BigUnsigned
155 len = a2->len + 1;
156 allocate(len);
157 // For each block index that is present in both inputs...
158 for (i = 0, carryIn = false; i < b2->len; i++) {
159 // Add input blocks
160 temp = a2->blk[i] + b2->blk[i];
161 // If a rollover occurred, the result is less than either input.
162 // This test is used many times in the BigUnsigned code.
163 carryOut = (temp < a2->blk[i]);
164 // If a carry was input, handle it
165 if (carryIn) {
166 temp++;
167 carryOut |= (temp == 0);
168 }
169 blk[i] = temp; // Save the addition result
170 carryIn = carryOut; // Pass the carry along
171 }
172 // If there is a carry left over, increase blocks until
173 // one does not roll over.
174 for (; i < a2->len && carryIn; i++) {
175 temp = a2->blk[i] + 1;
176 carryIn = (temp == 0);
177 blk[i] = temp;
178 }
179 // If the carry was resolved but the larger number
180 // still has blocks, copy them over.
181 for (; i < a2->len; i++)
182 blk[i] = a2->blk[i];
183 // Set the extra block if there's still a carry, decrease length otherwise
184 if (carryIn)
185 blk[i] = 1;
186 else
187 len--;
188}
189
190void BigUnsigned::subtract(const BigUnsigned &a, const BigUnsigned &b) {
191 DTRT_ALIASED(this == &a || this == &b, subtract(a, b));
192 if (b.len == 0) {
193 // If b is zero, copy a.
194 operator =(a);
195 return;
196 } else if (a.len < b.len)
197 // If a is shorter than b, the result is negative.
Bo Xu8ad00402014-12-02 13:06:22 -0800198 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800199 // Some variables...
200 bool borrowIn, borrowOut;
201 Blk temp;
202 Index i;
203 // Set preliminary length and make room
204 len = a.len;
205 allocate(len);
206 // For each block index that is present in both inputs...
207 for (i = 0, borrowIn = false; i < b.len; i++) {
208 temp = a.blk[i] - b.blk[i];
209 // If a reverse rollover occurred,
210 // the result is greater than the block from a.
211 borrowOut = (temp > a.blk[i]);
212 // Handle an incoming borrow
213 if (borrowIn) {
214 borrowOut |= (temp == 0);
215 temp--;
216 }
217 blk[i] = temp; // Save the subtraction result
218 borrowIn = borrowOut; // Pass the borrow along
219 }
220 // If there is a borrow left over, decrease blocks until
221 // one does not reverse rollover.
222 for (; i < a.len && borrowIn; i++) {
223 borrowIn = (a.blk[i] == 0);
224 blk[i] = a.blk[i] - 1;
225 }
226 /* If there's still a borrow, the result is negative.
227 * Throw an exception, but zero out this object so as to leave it in a
228 * predictable state. */
229 if (borrowIn) {
230 len = 0;
Bo Xu8ad00402014-12-02 13:06:22 -0800231 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800232 } else
233 // Copy over the rest of the blocks
234 for (; i < a.len; i++)
235 blk[i] = a.blk[i];
236 // Zap leading zeros
237 zapLeadingZeros();
238}
239
240/*
241 * About the multiplication and division algorithms:
242 *
243 * I searched unsucessfully for fast C++ built-in operations like the `b_0'
244 * and `c_0' Knuth describes in Section 4.3.1 of ``The Art of Computer
245 * Programming'' (replace `place' by `Blk'):
246 *
247 * ``b_0[:] multiplication of a one-place integer by another one-place
248 * integer, giving a two-place answer;
249 *
250 * ``c_0[:] division of a two-place integer by a one-place integer,
251 * provided that the quotient is a one-place integer, and yielding
252 * also a one-place remainder.''
253 *
254 * I also missed his note that ``[b]y adjusting the word size, if
255 * necessary, nearly all computers will have these three operations
256 * available'', so I gave up on trying to use algorithms similar to his.
257 * A future version of the library might include such algorithms; I
258 * would welcome contributions from others for this.
259 *
260 * I eventually decided to use bit-shifting algorithms. To multiply `a'
261 * and `b', we zero out the result. Then, for each `1' bit in `a', we
262 * shift `b' left the appropriate amount and add it to the result.
263 * Similarly, to divide `a' by `b', we shift `b' left varying amounts,
264 * repeatedly trying to subtract it from `a'. When we succeed, we note
265 * the fact by setting a bit in the quotient. While these algorithms
266 * have the same O(n^2) time complexity as Knuth's, the ``constant factor''
267 * is likely to be larger.
268 *
269 * Because I used these algorithms, which require single-block addition
270 * and subtraction rather than single-block multiplication and division,
271 * the innermost loops of all four routines are very similar. Study one
272 * of them and all will become clear.
273 */
274
275/*
276 * This is a little inline function used by both the multiplication
277 * routine and the division routine.
278 *
279 * `getShiftedBlock' returns the `x'th block of `num << y'.
280 * `y' may be anything from 0 to N - 1, and `x' may be anything from
281 * 0 to `num.len'.
282 *
283 * Two things contribute to this block:
284 *
285 * (1) The `N - y' low bits of `num.blk[x]', shifted `y' bits left.
286 *
287 * (2) The `y' high bits of `num.blk[x-1]', shifted `N - y' bits right.
288 *
289 * But we must be careful if `x == 0' or `x == num.len', in
290 * which case we should use 0 instead of (2) or (1), respectively.
291 *
292 * If `y == 0', then (2) contributes 0, as it should. However,
293 * in some computer environments, for a reason I cannot understand,
294 * `a >> b' means `a >> (b % N)'. This means `num.blk[x-1] >> (N - y)'
295 * will return `num.blk[x-1]' instead of the desired 0 when `y == 0';
296 * the test `y == 0' handles this case specially.
297 */
298inline BigUnsigned::Blk getShiftedBlock(const BigUnsigned &num,
299 BigUnsigned::Index x, unsigned int y) {
300 BigUnsigned::Blk part1 = (x == 0 || y == 0) ? 0 : (num.blk[x - 1] >> (BigUnsigned::N - y));
301 BigUnsigned::Blk part2 = (x == num.len) ? 0 : (num.blk[x] << y);
302 return part1 | part2;
303}
304
305void BigUnsigned::multiply(const BigUnsigned &a, const BigUnsigned &b) {
306 DTRT_ALIASED(this == &a || this == &b, multiply(a, b));
307 // If either a or b is zero, set to zero.
308 if (a.len == 0 || b.len == 0) {
309 len = 0;
310 return;
311 }
312 /*
313 * Overall method:
314 *
315 * Set this = 0.
316 * For each 1-bit of `a' (say the `i2'th bit of block `i'):
317 * Add `b << (i blocks and i2 bits)' to *this.
318 */
319 // Variables for the calculation
320 Index i, j, k;
321 unsigned int i2;
322 Blk temp;
323 bool carryIn, carryOut;
324 // Set preliminary length and make room
325 len = a.len + b.len;
326 allocate(len);
327 // Zero out this object
328 for (i = 0; i < len; i++)
329 blk[i] = 0;
330 // For each block of the first number...
331 for (i = 0; i < a.len; i++) {
332 // For each 1-bit of that block...
333 for (i2 = 0; i2 < N; i2++) {
334 if ((a.blk[i] & (Blk(1) << i2)) == 0)
335 continue;
336 /*
337 * Add b to this, shifted left i blocks and i2 bits.
338 * j is the index in b, and k = i + j is the index in this.
339 *
340 * `getShiftedBlock', a short inline function defined above,
341 * is now used for the bit handling. It replaces the more
342 * complex `bHigh' code, in which each run of the loop dealt
343 * immediately with the low bits and saved the high bits to
344 * be picked up next time. The last run of the loop used to
345 * leave leftover high bits, which were handled separately.
346 * Instead, this loop runs an additional time with j == b.len.
347 * These changes were made on 2005.01.11.
348 */
349 for (j = 0, k = i, carryIn = false; j <= b.len; j++, k++) {
350 /*
351 * The body of this loop is very similar to the body of the first loop
352 * in `add', except that this loop does a `+=' instead of a `+'.
353 */
354 temp = blk[k] + getShiftedBlock(b, j, i2);
355 carryOut = (temp < blk[k]);
356 if (carryIn) {
357 temp++;
358 carryOut |= (temp == 0);
359 }
360 blk[k] = temp;
361 carryIn = carryOut;
362 }
363 // No more extra iteration to deal with `bHigh'.
364 // Roll-over a carry as necessary.
365 for (; carryIn; k++) {
366 blk[k]++;
367 carryIn = (blk[k] == 0);
368 }
369 }
370 }
371 // Zap possible leading zero
372 if (blk[len - 1] == 0)
373 len--;
374}
375
376/*
377 * DIVISION WITH REMAINDER
378 * This monstrous function mods *this by the given divisor b while storing the
379 * quotient in the given object q; at the end, *this contains the remainder.
380 * The seemingly bizarre pattern of inputs and outputs was chosen so that the
381 * function copies as little as possible (since it is implemented by repeated
382 * subtraction of multiples of b from *this).
383 *
384 * "modWithQuotient" might be a better name for this function, but I would
385 * rather not change the name now.
386 */
387void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) {
388 /* Defending against aliased calls is more complex than usual because we
389 * are writing to both *this and q.
390 *
391 * It would be silly to try to write quotient and remainder to the
392 * same variable. Rule that out right away. */
393 if (this == &q)
Bo Xu8ad00402014-12-02 13:06:22 -0800394 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800395 /* Now *this and q are separate, so the only concern is that b might be
396 * aliased to one of them. If so, use a temporary copy of b. */
397 if (this == &b || &q == &b) {
398 BigUnsigned tmpB(b);
399 divideWithRemainder(tmpB, q);
400 return;
401 }
402
403 /*
404 * Knuth's definition of mod (which this function uses) is somewhat
405 * different from the C++ definition of % in case of division by 0.
406 *
407 * We let a / 0 == 0 (it doesn't matter much) and a % 0 == a, no
408 * exceptions thrown. This allows us to preserve both Knuth's demand
409 * that a mod 0 == a and the useful property that
410 * (a / b) * b + (a % b) == a.
411 */
412 if (b.len == 0) {
413 q.len = 0;
414 return;
415 }
416
417 /*
418 * If *this.len < b.len, then *this < b, and we can be sure that b doesn't go into
419 * *this at all. The quotient is 0 and *this is already the remainder (so leave it alone).
420 */
421 if (len < b.len) {
422 q.len = 0;
423 return;
424 }
425
426 // At this point we know (*this).len >= b.len > 0. (Whew!)
427
428 /*
429 * Overall method:
430 *
431 * For each appropriate i and i2, decreasing:
432 * Subtract (b << (i blocks and i2 bits)) from *this, storing the
433 * result in subtractBuf.
434 * If the subtraction succeeds with a nonnegative result:
435 * Turn on bit i2 of block i of the quotient q.
436 * Copy subtractBuf back into *this.
437 * Otherwise bit i2 of block i remains off, and *this is unchanged.
438 *
439 * Eventually q will contain the entire quotient, and *this will
440 * be left with the remainder.
441 *
442 * subtractBuf[x] corresponds to blk[x], not blk[x+i], since 2005.01.11.
443 * But on a single iteration, we don't touch the i lowest blocks of blk
444 * (and don't use those of subtractBuf) because these blocks are
445 * unaffected by the subtraction: we are subtracting
446 * (b << (i blocks and i2 bits)), which ends in at least `i' zero
447 * blocks. */
448 // Variables for the calculation
449 Index i, j, k;
450 unsigned int i2;
451 Blk temp;
452 bool borrowIn, borrowOut;
453
454 /*
455 * Make sure we have an extra zero block just past the value.
456 *
457 * When we attempt a subtraction, we might shift `b' so
458 * its first block begins a few bits left of the dividend,
459 * and then we'll try to compare these extra bits with
460 * a nonexistent block to the left of the dividend. The
461 * extra zero block ensures sensible behavior; we need
462 * an extra block in `subtractBuf' for exactly the same reason.
463 */
464 Index origLen = len; // Save real length.
465 /* To avoid an out-of-bounds access in case of reallocation, allocate
466 * first and then increment the logical length. */
467 allocateAndCopy(len + 1);
468 len++;
469 blk[origLen] = 0; // Zero the added block.
470
471 // subtractBuf holds part of the result of a subtraction; see above.
472 Blk *subtractBuf = new Blk[len];
473
474 // Set preliminary length for quotient and make room
475 q.len = origLen - b.len + 1;
476 q.allocate(q.len);
477 // Zero out the quotient
478 for (i = 0; i < q.len; i++)
479 q.blk[i] = 0;
480
481 // For each possible left-shift of b in blocks...
482 i = q.len;
483 while (i > 0) {
484 i--;
485 // For each possible left-shift of b in bits...
486 // (Remember, N is the number of bits in a Blk.)
487 q.blk[i] = 0;
488 i2 = N;
489 while (i2 > 0) {
490 i2--;
491 /*
492 * Subtract b, shifted left i blocks and i2 bits, from *this,
493 * and store the answer in subtractBuf. In the for loop, `k == i + j'.
494 *
495 * Compare this to the middle section of `multiply'. They
496 * are in many ways analogous. See especially the discussion
497 * of `getShiftedBlock'.
498 */
499 for (j = 0, k = i, borrowIn = false; j <= b.len; j++, k++) {
500 temp = blk[k] - getShiftedBlock(b, j, i2);
501 borrowOut = (temp > blk[k]);
502 if (borrowIn) {
503 borrowOut |= (temp == 0);
504 temp--;
505 }
506 // Since 2005.01.11, indices of `subtractBuf' directly match those of `blk', so use `k'.
507 subtractBuf[k] = temp;
508 borrowIn = borrowOut;
509 }
510 // No more extra iteration to deal with `bHigh'.
511 // Roll-over a borrow as necessary.
512 for (; k < origLen && borrowIn; k++) {
513 borrowIn = (blk[k] == 0);
514 subtractBuf[k] = blk[k] - 1;
515 }
516 /*
517 * If the subtraction was performed successfully (!borrowIn),
518 * set bit i2 in block i of the quotient.
519 *
520 * Then, copy the portion of subtractBuf filled by the subtraction
521 * back to *this. This portion starts with block i and ends--
522 * where? Not necessarily at block `i + b.len'! Well, we
523 * increased k every time we saved a block into subtractBuf, so
524 * the region of subtractBuf we copy is just [i, k).
525 */
526 if (!borrowIn) {
527 q.blk[i] |= (Blk(1) << i2);
528 while (k > i) {
529 k--;
530 blk[k] = subtractBuf[k];
531 }
532 }
533 }
534 }
535 // Zap possible leading zero in quotient
536 if (q.blk[q.len - 1] == 0)
537 q.len--;
538 // Zap any/all leading zeros in remainder
539 zapLeadingZeros();
540 // Deallocate subtractBuf.
541 // (Thanks to Brad Spencer for noticing my accidental omission of this!)
542 delete [] subtractBuf;
543}
544
545/* BITWISE OPERATORS
546 * These are straightforward blockwise operations except that they differ in
547 * the output length and the necessity of zapLeadingZeros. */
548
549void BigUnsigned::bitAnd(const BigUnsigned &a, const BigUnsigned &b) {
550 DTRT_ALIASED(this == &a || this == &b, bitAnd(a, b));
551 // The bitwise & can't be longer than either operand.
552 len = (a.len >= b.len) ? b.len : a.len;
553 allocate(len);
554 Index i;
555 for (i = 0; i < len; i++)
556 blk[i] = a.blk[i] & b.blk[i];
557 zapLeadingZeros();
558}
559
560void BigUnsigned::bitOr(const BigUnsigned &a, const BigUnsigned &b) {
561 DTRT_ALIASED(this == &a || this == &b, bitOr(a, b));
562 Index i;
563 const BigUnsigned *a2, *b2;
564 if (a.len >= b.len) {
565 a2 = &a;
566 b2 = &b;
567 } else {
568 a2 = &b;
569 b2 = &a;
570 }
571 allocate(a2->len);
572 for (i = 0; i < b2->len; i++)
573 blk[i] = a2->blk[i] | b2->blk[i];
574 for (; i < a2->len; i++)
575 blk[i] = a2->blk[i];
576 len = a2->len;
577 // Doesn't need zapLeadingZeros.
578}
579
580void BigUnsigned::bitXor(const BigUnsigned &a, const BigUnsigned &b) {
581 DTRT_ALIASED(this == &a || this == &b, bitXor(a, b));
582 Index i;
583 const BigUnsigned *a2, *b2;
584 if (a.len >= b.len) {
585 a2 = &a;
586 b2 = &b;
587 } else {
588 a2 = &b;
589 b2 = &a;
590 }
591 allocate(a2->len);
592 for (i = 0; i < b2->len; i++)
593 blk[i] = a2->blk[i] ^ b2->blk[i];
594 for (; i < a2->len; i++)
595 blk[i] = a2->blk[i];
596 len = a2->len;
597 zapLeadingZeros();
598}
599
600void BigUnsigned::bitShiftLeft(const BigUnsigned &a, int b) {
601 DTRT_ALIASED(this == &a, bitShiftLeft(a, b));
602 if (b < 0) {
603 if (b << 1 == 0)
Bo Xu8ad00402014-12-02 13:06:22 -0800604 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800605 else {
606 bitShiftRight(a, -b);
607 return;
608 }
609 }
610 Index shiftBlocks = b / N;
611 unsigned int shiftBits = b % N;
612 // + 1: room for high bits nudged left into another block
613 len = a.len + shiftBlocks + 1;
614 allocate(len);
615 Index i, j;
616 for (i = 0; i < shiftBlocks; i++)
617 blk[i] = 0;
618 for (j = 0, i = shiftBlocks; j <= a.len; j++, i++)
619 blk[i] = getShiftedBlock(a, j, shiftBits);
620 // Zap possible leading zero
621 if (blk[len - 1] == 0)
622 len--;
623}
624
625void BigUnsigned::bitShiftRight(const BigUnsigned &a, int b) {
626 DTRT_ALIASED(this == &a, bitShiftRight(a, b));
627 if (b < 0) {
628 if (b << 1 == 0)
Bo Xu8ad00402014-12-02 13:06:22 -0800629 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800630 else {
631 bitShiftLeft(a, -b);
632 return;
633 }
634 }
635 // This calculation is wacky, but expressing the shift as a left bit shift
636 // within each block lets us use getShiftedBlock.
637 Index rightShiftBlocks = (b + N - 1) / N;
638 unsigned int leftShiftBits = N * rightShiftBlocks - b;
639 // Now (N * rightShiftBlocks - leftShiftBits) == b
640 // and 0 <= leftShiftBits < N.
641 if (rightShiftBlocks >= a.len + 1) {
642 // All of a is guaranteed to be shifted off, even considering the left
643 // bit shift.
644 len = 0;
645 return;
646 }
647 // Now we're allocating a positive amount.
648 // + 1: room for high bits nudged left into another block
649 len = a.len + 1 - rightShiftBlocks;
650 allocate(len);
651 Index i, j;
652 for (j = rightShiftBlocks, i = 0; j <= a.len; j++, i++)
653 blk[i] = getShiftedBlock(a, j, leftShiftBits);
654 // Zap possible leading zero
655 if (blk[len - 1] == 0)
656 len--;
657}
658
659// INCREMENT/DECREMENT OPERATORS
660
661// Prefix increment
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000662BigUnsigned& BigUnsigned::operator ++() {
Bo Xu8ad00402014-12-02 13:06:22 -0800663 Index i;
664 bool carry = true;
665 for (i = 0; i < len && carry; i++) {
666 blk[i]++;
667 carry = (blk[i] == 0);
668 }
669 if (carry) {
670 // Allocate and then increase length, as in divideWithRemainder
671 allocateAndCopy(len + 1);
672 len++;
673 blk[i] = 1;
674 }
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000675 return *this;
Bo Xu8ad00402014-12-02 13:06:22 -0800676}
677
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000678// Postfix increment
679BigUnsigned BigUnsigned::operator ++(int) {
680 BigUnsigned temp(*this);
Bo Xu8ad00402014-12-02 13:06:22 -0800681 operator ++();
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000682 return temp;
Bo Xu8ad00402014-12-02 13:06:22 -0800683}
684
685// Prefix decrement
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000686BigUnsigned& BigUnsigned::operator --() {
Bo Xu8ad00402014-12-02 13:06:22 -0800687 if (len == 0)
Bo Xu8ad00402014-12-02 13:06:22 -0800688 abort();
Bo Xu8ad00402014-12-02 13:06:22 -0800689 Index i;
690 bool borrow = true;
691 for (i = 0; borrow; i++) {
692 borrow = (blk[i] == 0);
693 blk[i]--;
694 }
695 // Zap possible leading zero (there can only be one)
696 if (blk[len - 1] == 0)
697 len--;
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000698 return *this;
Bo Xu8ad00402014-12-02 13:06:22 -0800699}
700
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000701// Postfix decrement
702BigUnsigned BigUnsigned::operator --(int) {
703 BigUnsigned temp(*this);
Bo Xu8ad00402014-12-02 13:06:22 -0800704 operator --();
Andrew Weintraubd1fb2c52019-06-24 22:47:15 +0000705 return temp;
Bo Xu8ad00402014-12-02 13:06:22 -0800706}