Push version 2.5.6 to trunk.
Added support for VFP rounding modes to the ARM simulator.
Fixed multiplication overflow bug (issue 927).
Added a limit for the amount of executable memory (issue 925).
git-svn-id: http://v8.googlecode.com/svn/trunk@5804 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 596caf7..030c643 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -40,6 +40,7 @@
api.cc
assembler.cc
ast.cc
+ bignum.cc
bootstrapper.cc
builtins.cc
cached-powers.cc
diff --git a/src/api.cc b/src/api.cc
index ee7ad3a..9da3346 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -393,14 +393,18 @@
ResourceConstraints::ResourceConstraints()
: max_young_space_size_(0),
max_old_space_size_(0),
+ max_executable_size_(0),
stack_limit_(NULL) { }
bool SetResourceConstraints(ResourceConstraints* constraints) {
int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size();
- if (young_space_size != 0 || old_gen_size != 0) {
- bool result = i::Heap::ConfigureHeap(young_space_size / 2, old_gen_size);
+ int max_executable_size = constraints->max_executable_size();
+ if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
+ bool result = i::Heap::ConfigureHeap(young_space_size / 2,
+ old_gen_size,
+ max_executable_size);
if (!result) return false;
}
if (constraints->stack_limit() != NULL) {
@@ -3259,11 +3263,15 @@
}
-HeapStatistics::HeapStatistics(): total_heap_size_(0), used_heap_size_(0) { }
+HeapStatistics::HeapStatistics(): total_heap_size_(0),
+ total_heap_size_executable_(0),
+ used_heap_size_(0) { }
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
+ heap_statistics->set_total_heap_size_executable(
+ i::Heap::CommittedMemoryExecutable());
heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 72835ba..4cb421c 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2144,6 +2144,7 @@
const int dst_code,
const VFPType src_type,
const int src_code,
+ Assembler::ConversionMode mode,
const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
@@ -2162,7 +2163,7 @@
if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- op = 1; // round towards zero
+ op = mode;
} else {
ASSERT(IsIntegerVFPType(src_type));
opc2 = 0x0;
@@ -2186,57 +2187,64 @@
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
+ emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
+ emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
+ emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
+ emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
+ emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
+ emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
+ emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2329,6 +2337,16 @@
}
+void Assembler::vmsr(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xE*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
@@ -2339,7 +2357,6 @@
}
-
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 0579235..de3931c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1008,26 +1008,37 @@
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
+ enum ConversionMode {
+ FPSCRRounding = 0,
+ RoundToZero = 1
+ };
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
+ ConversionMode mode = RoundToZero,
const Condition cond = al);
void vadd(const DwVfpRegister dst,
@@ -1056,6 +1067,8 @@
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
+ void vmsr(const Register dst,
+ const Condition cond = al);
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 123c5e7..36f6283 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -206,6 +206,13 @@
kDoublePrecision = 1
};
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum FPSCRRoundingModes {
+ RN, // Round to Nearest.
+ RP, // Round towards Plus Infinity.
+ RM, // Round towards Minus Infinity.
+ RZ // Round towards zero.
+};
typedef int32_t instr_t;
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a09afdf..4c1f983 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1988,9 +1988,9 @@
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, ne);
+ __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
} else {
- __ vcvt_u32_f64(s0, d0, ne);
+ __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
}
__ vmov(r5, s0, ne);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index cb91520..3ec5f44 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -705,6 +705,7 @@
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
+ FPSCR_rounding_mode_ = RZ;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@@ -2501,10 +2502,45 @@
(instr->VAField() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmrs
- if (instr->RtField() == 0xF)
+ uint32_t rt = instr->RtField();
+ if (rt == 0xF) {
Copy_FPSCR_to_APSR();
- else
- UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ // Emulate FPSCR from the Simulator flags.
+ uint32_t fpscr = (n_flag_FPSCR_ << 31) |
+ (z_flag_FPSCR_ << 30) |
+ (c_flag_FPSCR_ << 29) |
+ (v_flag_FPSCR_ << 28) |
+ (inexact_vfp_flag_ << 4) |
+ (underflow_vfp_flag_ << 3) |
+ (overflow_vfp_flag_ << 2) |
+ (div_zero_vfp_flag_ << 1) |
+ (inv_op_vfp_flag_ << 0) |
+ (FPSCR_rounding_mode_ << 22);
+ set_register(rt, fpscr);
+ }
+ } else if ((instr->VLField() == 0x0) &&
+ (instr->VCField() == 0x0) &&
+ (instr->VAField() == 0x7) &&
+ (instr->Bits(19, 16) == 0x1)) {
+ // vmsr
+ uint32_t rt = instr->RtField();
+ if (rt == pc) {
+ UNREACHABLE();
+ } else {
+ uint32_t rt_value = get_register(rt);
+ n_flag_FPSCR_ = (rt_value >> 31) & 1;
+ z_flag_FPSCR_ = (rt_value >> 30) & 1;
+ c_flag_FPSCR_ = (rt_value >> 29) & 1;
+ v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ inexact_vfp_flag_ = (rt_value >> 4) & 1;
+ underflow_vfp_flag_ = (rt_value >> 3) & 1;
+ overflow_vfp_flag_ = (rt_value >> 2) & 1;
+ div_zero_vfp_flag_ = (rt_value >> 1) & 1;
+ inv_op_vfp_flag_ = (rt_value >> 0) & 1;
+ FPSCR_rounding_mode_ =
+ static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3);
+ }
} else {
UNIMPLEMENTED(); // Not used by V8.
}
@@ -2605,29 +2641,71 @@
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
+ FPSCRRoundingModes mode;
if (instr->Bit(7) != 1) {
- // Only rounding towards zero supported.
- UNIMPLEMENTED(); // Not used by V8.
+ // Use FPSCR defined rounding mode.
+ mode = FPSCR_rounding_mode_;
+ // Only RZ and RM modes are supported.
+ ASSERT((mode == RM) || (mode == RZ));
+ } else {
+ // VFP uses round towards zero by default.
+ mode = RZ;
}
int dst = instr->VFPDRegCode(kSinglePrecision);
int src = instr->VFPMRegCode(src_precision);
+ int32_t kMaxInt = v8::internal::kMaxInt;
+ int32_t kMinInt = v8::internal::kMinInt;
+ switch (mode) {
+ case RM:
+ if (src_precision == kDoublePrecision) {
+ double val = get_double_from_d_register(src);
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
+ inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
+ int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+ static_cast<int32_t>(val);
+ sint = sint > val ? sint - 1 : sint;
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
+ set_s_register_from_sinteger(dst, sint);
+ } else {
+ float val = get_float_from_s_register(src);
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
+ inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
- set_s_register_from_sinteger(dst, sint);
+ int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+ static_cast<int32_t>(val);
+ sint = sint > val ? sint - 1 : sint;
+
+ set_s_register_from_sinteger(dst, sint);
+ }
+ break;
+ case RZ:
+ if (src_precision == kDoublePrecision) {
+ double val = get_double_from_d_register(src);
+
+ inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+
+ int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+ static_cast<int32_t>(val);
+
+ set_s_register_from_sinteger(dst, sint);
+ } else {
+ float val = get_float_from_s_register(src);
+
+ inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+
+ int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+ static_cast<int32_t>(val);
+
+ set_s_register_from_sinteger(dst, sint);
+ }
+ break;
+
+ default:
+ UNREACHABLE();
}
+
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3e02348..c37b3f7 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -306,6 +306,9 @@
bool c_flag_FPSCR_;
bool v_flag_FPSCR_;
+ // VFP rounding mode. See ARM DDI 0406B Page A2-29.
+ FPSCRRoundingModes FPSCR_rounding_mode_;
+
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
bool div_zero_vfp_flag_;
diff --git a/src/bignum.cc b/src/bignum.cc
new file mode 100644
index 0000000..dd1537a
--- /dev/null
+++ b/src/bignum.cc
@@ -0,0 +1,767 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bignum.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+Bignum::Bignum()
+ : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
+ for (int i = 0; i < kBigitCapacity; ++i) {
+ bigits_[i] = 0;
+ }
+}
+
+
+template<typename S>
+static int BitSize(S value) {
+ return 8 * sizeof(value);
+}
+
+// Guaranteed to lie in one Bigit.
+void Bignum::AssignUInt16(uint16_t value) {
+ ASSERT(kBigitSize >= BitSize(value));
+ Zero();
+ if (value == 0) return;
+
+ EnsureCapacity(1);
+ bigits_[0] = value;
+ used_digits_ = 1;
+}
+
+
+void Bignum::AssignUInt64(uint64_t value) {
+ const int kUInt64Size = 64;
+
+ Zero();
+ if (value == 0) return;
+
+ int needed_bigits = kUInt64Size / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ for (int i = 0; i < needed_bigits; ++i) {
+ bigits_[i] = value & kBigitMask;
+ value = value >> kBigitSize;
+ }
+ used_digits_ = needed_bigits;
+ Clamp();
+}
+
+
+void Bignum::AssignBignum(const Bignum& other) {
+ exponent_ = other.exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ bigits_[i] = other.bigits_[i];
+ }
+ // Clear the excess digits (if there were any).
+ for (int i = other.used_digits_; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = other.used_digits_;
+}
+
+
+static uint64_t ReadUInt64(Vector<const char> buffer,
+ int from,
+ int digits_to_read) {
+ uint64_t result = 0;
+ for (int i = from; i < from + digits_to_read; ++i) {
+ int digit = buffer[i] - '0';
+ ASSERT(0 <= digit && digit <= 9);
+ result = result * 10 + digit;
+ }
+ return result;
+}
+
+
+void Bignum::AssignDecimalString(Vector<const char> value) {
+ // 2^64 = 18446744073709551616 > 10^19
+ const int kMaxUint64DecimalDigits = 19;
+ Zero();
+ int length = value.length();
+ int pos = 0;
+ // Let's just say that each digit needs 4 bits.
+ while (length >= kMaxUint64DecimalDigits) {
+ uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
+ pos += kMaxUint64DecimalDigits;
+ length -= kMaxUint64DecimalDigits;
+ MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
+ AddUInt64(digits);
+ }
+ uint64_t digits = ReadUInt64(value, pos, length);
+ MultiplyByPowerOfTen(length);
+ AddUInt64(digits);
+ Clamp();
+}
+
+
+static int HexCharValue(char c) {
+ if ('0' <= c && c <= '9') return c - '0';
+ if ('a' <= c && c <= 'f') return 10 + c - 'a';
+ if ('A' <= c && c <= 'F') return 10 + c - 'A';
+ UNREACHABLE();
+ return 0; // To make compiler happy.
+}
+
+
+void Bignum::AssignHexString(Vector<const char> value) {
+ Zero();
+ int length = value.length();
+
+ int needed_bigits = length * 4 / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ int string_index = length - 1;
+ for (int i = 0; i < needed_bigits - 1; ++i) {
+ // These bigits are guaranteed to be "full".
+ Chunk current_bigit = 0;
+ for (int j = 0; j < kBigitSize / 4; j++) {
+ current_bigit += HexCharValue(value[string_index--]) << (j * 4);
+ }
+ bigits_[i] = current_bigit;
+ }
+ used_digits_ = needed_bigits - 1;
+
+ Chunk most_significant_bigit = 0; // Could be = 0;
+ for (int j = 0; j <= string_index; ++j) {
+ most_significant_bigit <<= 4;
+ most_significant_bigit += HexCharValue(value[j]);
+ }
+ if (most_significant_bigit != 0) {
+ bigits_[used_digits_] = most_significant_bigit;
+ used_digits_++;
+ }
+ Clamp();
+}
+
+
+void Bignum::AddUInt64(uint64_t operand) {
+ if (operand == 0) return;
+ Bignum other;
+ other.AssignUInt64(operand);
+ AddBignum(other);
+}
+
+
+void Bignum::AddBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+
+ // If this has a greater exponent than other append zero-bigits to this.
+ // After this call exponent_ <= other.exponent_.
+ Align(other);
+
+ // There are two possibilities:
+ // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
+ // bbbbb 00000000
+ // ----------------
+ // ccccccccccc 0000
+ // or
+ // aaaaaaaaaa 0000
+ // bbbbbbbbb 0000000
+ // -----------------
+ // cccccccccccc 0000
+ // In both cases we might need a carry bigit.
+
+ EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
+ Chunk carry = 0;
+ int bigit_pos = other.exponent_ - exponent_;
+ ASSERT(bigit_pos >= 0);
+ for (int i = 0; i < other.used_digits_; ++i) {
+ Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+
+ while (carry != 0) {
+ Chunk sum = bigits_[bigit_pos] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+ used_digits_ = Max(bigit_pos, used_digits_);
+ ASSERT(IsClamped());
+}
+
+
+void Bignum::SubtractBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ // We require this to be bigger than other.
+ ASSERT(LessEqual(other, *this));
+
+ Align(other);
+
+ int offset = other.exponent_ - exponent_;
+ Chunk borrow = 0;
+ int i;
+ for (i = 0; i < other.used_digits_; ++i) {
+ ASSERT((borrow == 0) || (borrow == 1));
+ Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ }
+ while (borrow != 0) {
+ Chunk difference = bigits_[i + offset] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+void Bignum::ShiftLeft(int shift_amount) {
+ if (used_digits_ == 0) return;
+ exponent_ += shift_amount / kBigitSize;
+ int local_shift = shift_amount % kBigitSize;
+ EnsureCapacity(used_digits_ + 1);
+ BigitsShiftLeft(local_shift);
+}
+
+
+void Bignum::MultiplyByUInt32(uint32_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ if (used_digits_ == 0) return;
+
+ // The product of a bigit with the factor is of size kBigitSize + 32.
+ // Assert that this number + 1 (for the carry) fits into double chunk.
+ ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
+ DoubleChunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
+ bigits_[i] = static_cast<Chunk>(product & kBigitMask);
+ carry = (product >> kBigitSize);
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByUInt64(uint64_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ ASSERT(kBigitSize < 32);
+ uint64_t carry = 0;
+ uint64_t low = factor & 0xFFFFFFFF;
+ uint64_t high = factor >> 32;
+ for (int i = 0; i < used_digits_; ++i) {
+ uint64_t product_low = low * bigits_[i];
+ uint64_t product_high = high * bigits_[i];
+ uint64_t tmp = (carry & kBigitMask) + product_low;
+ bigits_[i] = tmp & kBigitMask;
+ carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
+ (product_high << (32 - kBigitSize));
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByPowerOfTen(int exponent) {
+ const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d);
+ const uint16_t kFive1 = 5;
+ const uint16_t kFive2 = kFive1 * 5;
+ const uint16_t kFive3 = kFive2 * 5;
+ const uint16_t kFive4 = kFive3 * 5;
+ const uint16_t kFive5 = kFive4 * 5;
+ const uint16_t kFive6 = kFive5 * 5;
+ const uint32_t kFive7 = kFive6 * 5;
+ const uint32_t kFive8 = kFive7 * 5;
+ const uint32_t kFive9 = kFive8 * 5;
+ const uint32_t kFive10 = kFive9 * 5;
+ const uint32_t kFive11 = kFive10 * 5;
+ const uint32_t kFive12 = kFive11 * 5;
+ const uint32_t kFive13 = kFive12 * 5;
+ const uint32_t kFive1_to_12[] =
+ { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
+ kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
+
+ ASSERT(exponent >= 0);
+ if (exponent == 0) return;
+ if (used_digits_ == 0) return;
+
+ // We shift by exponent at the end just before returning.
+ int remaining_exponent = exponent;
+ while (remaining_exponent >= 27) {
+ MultiplyByUInt64(kFive27);
+ remaining_exponent -= 27;
+ }
+ while (remaining_exponent >= 13) {
+ MultiplyByUInt32(kFive13);
+ remaining_exponent -= 13;
+ }
+ if (remaining_exponent > 0) {
+ MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
+ }
+ ShiftLeft(exponent);
+}
+
+
+void Bignum::Square() {
+ ASSERT(IsClamped());
+ int product_length = 2 * used_digits_;
+ EnsureCapacity(product_length);
+
+ // Comba multiplication: compute each column separately.
+ // Example: r = a2a1a0 * b2b1b0.
+ // r = 1 * a0b0 +
+ // 10 * (a1b0 + a0b1) +
+ // 100 * (a2b0 + a1b1 + a0b2) +
+ // 1000 * (a2b1 + a1b2) +
+ // 10000 * a2b2
+ //
+ // In the worst case we have to accumulate nb-digits products of digit*digit.
+ //
+ // Assert that the additional number of bits in a DoubleChunk are enough to
+ // sum up used_digits of Bigit*Bigit.
+ if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
+ UNIMPLEMENTED();
+ }
+ DoubleChunk accumulator = 0;
+ // First shift the digits so we don't overwrite them.
+ int copy_offset = used_digits_;
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[copy_offset + i] = bigits_[i];
+ }
+ // We have two loops to avoid some 'if's in the loop.
+ for (int i = 0; i < used_digits_; ++i) {
+ // Process temporary digit i with power i.
+ // The sum of the two indices must be equal to i.
+ int bigit_index1 = i;
+ int bigit_index2 = 0;
+ // Sum all of the sub-products.
+ while (bigit_index1 >= 0) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ for (int i = used_digits_; i < product_length; ++i) {
+ int bigit_index1 = used_digits_ - 1;
+ int bigit_index2 = i - bigit_index1;
+ // Invariant: sum of both indices is again equal to i.
+ // Inner loop runs 0 times on last iteration, emptying accumulator.
+ while (bigit_index2 < used_digits_) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ // The overwritten bigits_[i] will never be read in further loop iterations,
+ // because bigit_index1 and bigit_index2 are always greater
+ // than i - used_digits_.
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ // Since the result was guaranteed to lie inside the number the
+ // accumulator must be 0 now.
+ ASSERT(accumulator == 0);
+
+ // Don't forget to update the used_digits and the exponent.
+ used_digits_ = product_length;
+ exponent_ *= 2;
+ Clamp();
+}
+
+
+void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
+ ASSERT(base != 0);
+ ASSERT(power_exponent >= 0);
+ if (power_exponent == 0) {
+ AssignUInt16(1);
+ return;
+ }
+ Zero();
+ int shifts = 0;
+ // We expect base to be in range 2-32, and most often to be 10.
+ // It does not make much sense to implement different algorithms for counting
+ // the bits.
+ while ((base & 1) == 0) {
+ base >>= 1;
+ shifts++;
+ }
+ int bit_size = 0;
+ int tmp_base = base;
+ while (tmp_base != 0) {
+ tmp_base >>= 1;
+ bit_size++;
+ }
+ int final_size = bit_size * power_exponent;
+ // 1 extra bigit for the shifting, and one for rounded final_size.
+ EnsureCapacity(final_size / kBigitSize + 2);
+
+ // Left to Right exponentiation.
+ int mask = 1;
+ while (power_exponent >= mask) mask <<= 1;
+
+ // The mask is now pointing to the bit above the most significant 1-bit of
+ // power_exponent.
+ // Get rid of first 1-bit;
+ mask >>= 2;
+ uint64_t this_value = base;
+
+ bool delayed_multipliciation = false;
+ const uint64_t max_32bits = 0xFFFFFFFF;
+ while (mask != 0 && this_value <= max_32bits) {
+ this_value = this_value * this_value;
+ // Verify that there is enough space in this_value to perform the
+ // multiplication. The first bit_size bits must be 0.
+ if ((power_exponent & mask) != 0) {
+ uint64_t base_bits_mask =
+ ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
+ bool high_bits_zero = (this_value & base_bits_mask) == 0;
+ if (high_bits_zero) {
+ this_value *= base;
+ } else {
+ delayed_multipliciation = true;
+ }
+ }
+ mask >>= 1;
+ }
+ AssignUInt64(this_value);
+ if (delayed_multipliciation) {
+ MultiplyByUInt32(base);
+ }
+
+ // Now do the same thing as a bignum.
+ while (mask != 0) {
+ Square();
+ if ((power_exponent & mask) != 0) {
+ MultiplyByUInt32(base);
+ }
+ mask >>= 1;
+ }
+
+ // And finally add the saved shifts.
+ ShiftLeft(shifts * power_exponent);
+}
+
+
+// Precondition: this/other < 16bit.
+uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ ASSERT(other.used_digits_ > 0);
+
+ // Easy case: if we have less digits than the divisor than the result is 0.
+ // Note: this handles the case where this == 0, too.
+ if (BigitLength() < other.BigitLength()) {
+ return 0;
+ }
+
+ Align(other);
+
+ uint16_t result = 0;
+
+ // Start by removing multiples of 'other' until both numbers have the same
+ // number of digits.
+ while (BigitLength() > other.BigitLength()) {
+ // This naive approach is extremely inefficient if the this divided other
+ // might be big. This function is implemented for doubleToString where
+ // the result should be small (less than 10).
+ ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
+ // Remove the multiples of the first digit.
+ // Example this = 23 and other equals 9. -> Remove 2 multiples.
+ result += bigits_[used_digits_ - 1];
+ SubtractTimes(other, bigits_[used_digits_ - 1]);
+ }
+
+ ASSERT(BigitLength() == other.BigitLength());
+
+ // Both bignums are at the same length now.
+ // Since other has more than 0 digits we know that the access to
+ // bigits_[used_digits_ - 1] is safe.
+ Chunk this_bigit = bigits_[used_digits_ - 1];
+ Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
+
+ if (other.used_digits_ == 1) {
+ // Shortcut for easy (and common) case.
+ int quotient = this_bigit / other_bigit;
+ bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
+ result += quotient;
+ Clamp();
+ return result;
+ }
+
+ int division_estimate = this_bigit / (other_bigit + 1);
+ result += division_estimate;
+ SubtractTimes(other, division_estimate);
+
+ if (other_bigit * (division_estimate + 1) > this_bigit) {
+ // No need to even try to subtract. Even if other's remaining digits were 0
+ // another subtraction would be too much.
+ return result;
+ }
+
+ while (LessEqual(other, *this)) {
+ SubtractBignum(other);
+ result++;
+ }
+ return result;
+}
+
+
+template<typename S>
+static int SizeInHexChars(S number) {
+ ASSERT(number > 0);
+ int result = 0;
+ while (number != 0) {
+ number >>= 4;
+ result++;
+ }
+ return result;
+}
+
+
+static char HexCharOfValue(int value) {
+ ASSERT(0 <= value && value <= 16);
+ if (value < 10) return value + '0';
+ return value - 10 + 'A';
+}
+
+
+bool Bignum::ToHexString(char* buffer, int buffer_size) const {
+ ASSERT(IsClamped());
+ // Each bigit must be printable as separate hex-character.
+ ASSERT(kBigitSize % 4 == 0);
+ const int kHexCharsPerBigit = kBigitSize / 4;
+
+ if (used_digits_ == 0) {
+ if (buffer_size < 2) return false;
+ buffer[0] = '0';
+ buffer[1] = '\0';
+ return true;
+ }
+ // We add 1 for the terminating '\0' character.
+ int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
+ SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
+ if (needed_chars > buffer_size) return false;
+ int string_index = needed_chars - 1;
+ buffer[string_index--] = '\0';
+ for (int i = 0; i < exponent_; ++i) {
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = '0';
+ }
+ }
+ for (int i = 0; i < used_digits_ - 1; ++i) {
+ Chunk current_bigit = bigits_[i];
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
+ current_bigit >>= 4;
+ }
+ }
+ // And finally the last bigit.
+ Chunk most_significant_bigit = bigits_[used_digits_ - 1];
+ while (most_significant_bigit != 0) {
+ buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
+ most_significant_bigit >>= 4;
+ }
+ return true;
+}
+
+
+Bignum::Chunk Bignum::BigitAt(int index) const {
+ if (index >= BigitLength()) return 0;
+ if (index < exponent_) return 0;
+ return bigits_[index - exponent_];
+}
+
+
+int Bignum::Compare(const Bignum& a, const Bignum& b) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ int bigit_length_a = a.BigitLength();
+ int bigit_length_b = b.BigitLength();
+ if (bigit_length_a < bigit_length_b) return -1;
+ if (bigit_length_a > bigit_length_b) return +1;
+ for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
+ Chunk bigit_a = a.BigitAt(i);
+ Chunk bigit_b = b.BigitAt(i);
+ if (bigit_a < bigit_b) return -1;
+ if (bigit_a > bigit_b) return +1;
+ // Otherwise they are equal up to this digit. Try the next digit.
+ }
+ return 0;
+}
+
+
+int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ ASSERT(c.IsClamped());
+ if (a.BigitLength() < b.BigitLength()) {
+ return PlusCompare(b, a, c);
+ }
+ if (a.BigitLength() + 1 < c.BigitLength()) return -1;
+ if (a.BigitLength() > c.BigitLength()) return +1;
+ // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
+ // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
+ // of 'a'.
+ if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
+ return -1;
+ }
+
+ Chunk borrow = 0;
+ // Starting at min_exponent all digits are == 0. So no need to compare them.
+ int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
+ for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
+ Chunk chunk_a = a.BigitAt(i);
+ Chunk chunk_b = b.BigitAt(i);
+ Chunk chunk_c = c.BigitAt(i);
+ Chunk sum = chunk_a + chunk_b;
+ if (sum > chunk_c + borrow) {
+ return +1;
+ } else {
+ borrow = chunk_c + borrow - sum;
+ if (borrow > 1) return -1;
+ borrow <<= kBigitSize;
+ }
+ }
+ if (borrow == 0) return 0;
+ return -1;
+}
+
+
+void Bignum::Clamp() {
+ while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
+ used_digits_--;
+ }
+ if (used_digits_ == 0) {
+ // Zero.
+ exponent_ = 0;
+ }
+}
+
+
+bool Bignum::IsClamped() const {
+ return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
+}
+
+
+void Bignum::Zero() {
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = 0;
+ exponent_ = 0;
+}
+
+
+void Bignum::Align(const Bignum& other) {
+ if (exponent_ > other.exponent_) {
+ // If "X" represents a "hidden" digit (by the exponent) then we are in the
+ // following case (a == this, b == other):
+ // a: aaaaaaXXXX or a: aaaaaXXX
+ // b: bbbbbbX b: bbbbbbbbXX
+ // We replace some of the hidden digits (X) of a with 0 digits.
+ // a: aaaaaa000X or a: aaaaa0XX
+ int zero_digits = exponent_ - other.exponent_;
+ EnsureCapacity(used_digits_ + zero_digits);
+ for (int i = used_digits_ - 1; i >= 0; --i) {
+ bigits_[i + zero_digits] = bigits_[i];
+ }
+ for (int i = 0; i < zero_digits; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ += zero_digits;
+ exponent_ -= zero_digits;
+ ASSERT(used_digits_ >= 0);
+ ASSERT(exponent_ >= 0);
+ }
+}
+
+
+void Bignum::BigitsShiftLeft(int shift_amount) {
+ ASSERT(shift_amount < kBigitSize);
+ ASSERT(shift_amount >= 0);
+ Chunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
+ bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
+ carry = new_carry;
+ }
+ if (carry != 0) {
+ bigits_[used_digits_] = carry;
+ used_digits_++;
+ }
+}
+
+
+void Bignum::SubtractTimes(const Bignum& other, int factor) {
+ ASSERT(exponent_ <= other.exponent_);
+ if (factor < 3) {
+ for (int i = 0; i < factor; ++i) {
+ SubtractBignum(other);
+ }
+ return;
+ }
+ Chunk borrow = 0;
+ int exponent_diff = other.exponent_ - exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
+ DoubleChunk remove = borrow + product;
+ Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
+ bigits_[i + exponent_diff] = difference & kBigitMask;
+ borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
+ (remove >> kBigitSize));
+ }
+ for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
+ if (borrow == 0) return;
+ Chunk difference = bigits_[i] - borrow;
+ bigits_[i] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+} } // namespace v8::internal
diff --git a/src/bignum.h b/src/bignum.h
new file mode 100644
index 0000000..1d2bff6
--- /dev/null
+++ b/src/bignum.h
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BIGNUM_H_
+#define V8_BIGNUM_H_
+
+namespace v8 {
+namespace internal {
+
+class Bignum {
+ public:
+ // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
+ // This bignum can encode much bigger numbers, since it contains an
+ // exponent.
+ static const int kMaxSignificantBits = 3584;
+
+ Bignum();
+ void AssignUInt16(uint16_t value);
+ void AssignUInt64(uint64_t value);
+ void AssignBignum(const Bignum& other);
+
+ void AssignDecimalString(Vector<const char> value);
+ void AssignHexString(Vector<const char> value);
+
+ void AssignPowerUInt16(uint16_t base, int exponent);
+
+ void AddUInt16(uint16_t operand);
+ void AddUInt64(uint64_t operand);
+ void AddBignum(const Bignum& other);
+ // Precondition: this >= other.
+ void SubtractBignum(const Bignum& other);
+
+ void Square();
+ void ShiftLeft(int shift_amount);
+ void MultiplyByUInt32(uint32_t factor);
+ void MultiplyByUInt64(uint64_t factor);
+ void MultiplyByPowerOfTen(int exponent);
+ void Times10() { return MultiplyByUInt32(10); }
+ // Pseudocode:
+ // int result = this / other;
+ // this = this % other;
+ // In the worst case this function is in O(this/other).
+ uint16_t DivideModuloIntBignum(const Bignum& other);
+
+ bool ToHexString(char* buffer, int buffer_size) const;
+
+ static int Compare(const Bignum& a, const Bignum& b);
+ static bool Equal(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) == 0;
+ }
+ static bool LessEqual(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) <= 0;
+ }
+ static bool Less(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) < 0;
+ }
+ // Returns Compare(a + b, c);
+ static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
+ // Returns a + b == c
+ static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) == 0;
+ }
+ // Returns a + b <= c
+ static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) <= 0;
+ }
+ // Returns a + b < c
+ static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) < 0;
+ }
+ private:
+ typedef uint32_t Chunk;
+ typedef uint64_t DoubleChunk;
+
+ static const int kChunkSize = sizeof(Chunk) * 8;
+ static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
+ // With bigit size of 28 we loose some bits, but a double still fits easily
+ // into two chunks, and more importantly we can use the Comba multiplication.
+ static const int kBigitSize = 28;
+ static const Chunk kBigitMask = (1 << kBigitSize) - 1;
+ // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
+ // grow. There are no checks if the stack-allocated space is sufficient.
+ static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
+
+ void EnsureCapacity(int size) {
+ if (size > kBigitCapacity) {
+ UNREACHABLE();
+ }
+ }
+ void Align(const Bignum& other);
+ void Clamp();
+ bool IsClamped() const;
+ void Zero();
+ // Requires this to have enough capacity (no tests done).
+ // Updates used_digits_ if necessary.
+ // by must be < kBigitSize.
+ void BigitsShiftLeft(int shift_amount);
+ // BigitLength includes the "hidden" digits encoded in the exponent.
+ int BigitLength() const { return used_digits_ + exponent_; }
+ Chunk BigitAt(int index) const;
+ void SubtractTimes(const Bignum& other, int factor);
+
+ Chunk bigits_buffer_[kBigitCapacity];
+ // A vector backed by bigits_buffer_. This way accesses to the array are
+ // checked for out-of-bounds errors.
+ Vector<Chunk> bigits_;
+ int used_digits_;
+ // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
+ int exponent_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bignum);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_BIGNUM_H_
diff --git a/src/code-stubs.h b/src/code-stubs.h
index c0a8d30..ec64353 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -542,7 +542,7 @@
ApiFunction* fun() { return fun_; }
Major MajorKey() { return NoCache; }
int MinorKey() { return 0; }
- const char* GetName() { return "ApiEntryStub"; }
+ const char* GetName() { return "ApiGetterEntryStub"; }
// The accessor info associated with the function.
Handle<AccessorInfo> info_;
// The function to be called.
@@ -550,6 +550,32 @@
};
+class ApiCallEntryStub : public CodeStub {
+ public:
+ ApiCallEntryStub(Handle<CallHandlerInfo> info,
+ ApiFunction* fun)
+ : info_(info),
+ fun_(fun) { }
+ void Generate(MacroAssembler* masm);
+ virtual bool has_custom_cache() { return true; }
+ virtual bool GetCustomCache(Code** code_out);
+ virtual void SetCustomCache(Code* value);
+
+ static const int kStackSpace = 0;
+ static const int kArgc = 5;
+ private:
+ Handle<CallHandlerInfo> info() { return info_; }
+ ApiFunction* fun() { return fun_; }
+ Major MajorKey() { return NoCache; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ApiCallEntryStub"; }
+ // The call handler info associated with the function.
+ Handle<CallHandlerInfo> info_;
+ // The function to be called.
+ ApiFunction* fun_;
+};
+
+
class JSEntryStub : public CodeStub {
public:
JSEntryStub() { }
diff --git a/src/codegen.cc b/src/codegen.cc
index 2e32418..e954dd6 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -482,8 +482,8 @@
}
-bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
- Object* cache = info()->load_stub_cache();
+// Implementation of CodeStub::GetCustomCache.
+static bool GetCustomCacheHelper(Object* cache, Code** code_out) {
if (cache->IsUndefined()) {
return false;
} else {
@@ -493,9 +493,24 @@
}
+bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
+ return GetCustomCacheHelper(info()->load_stub_cache(), code_out);
+}
+
+
void ApiGetterEntryStub::SetCustomCache(Code* value) {
info()->set_load_stub_cache(value);
}
+bool ApiCallEntryStub::GetCustomCache(Code** code_out) {
+ return GetCustomCacheHelper(info()->call_stub_cache(), code_out);
+}
+
+
+void ApiCallEntryStub::SetCustomCache(Code* value) {
+ info()->set_call_stub_cache(value);
+}
+
+
} } // namespace v8::internal
diff --git a/src/double.h b/src/double.h
index e805173..65eded9 100644
--- a/src/double.h
+++ b/src/double.h
@@ -54,18 +54,20 @@
explicit Double(DiyFp diy_fp)
: d64_(DiyFpToUint64(diy_fp)) {}
+ // The value encoded by this Double must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
DiyFp AsDiyFp() const {
+ ASSERT(Sign() > 0);
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
- // this->Significand() must not be 0.
+ // The value encoded by this Double must be strictly greater than 0.
DiyFp AsNormalizedDiyFp() const {
+ ASSERT(value() > 0.0);
uint64_t f = Significand();
int e = Exponent();
- ASSERT(f != 0);
-
// The current double could be a denormal.
while ((f & kHiddenBit) == 0) {
f <<= 1;
@@ -82,6 +84,20 @@
return d64_;
}
+ // Returns the next greater double. Returns +infinity on input +infinity.
+ double NextDouble() const {
+ if (d64_ == kInfinity) return Double(kInfinity).value();
+ if (Sign() < 0 && Significand() == 0) {
+ // -0.0
+ return 0.0;
+ }
+ if (Sign() < 0) {
+ return Double(d64_ - 1).value();
+ } else {
+ return Double(d64_ + 1).value();
+ }
+ }
+
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
@@ -120,24 +136,30 @@
((d64 & kSignificandMask) != 0);
}
-
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) == 0);
}
-
int Sign() const {
uint64_t d64 = AsUint64();
return (d64 & kSignMask) == 0? 1: -1;
}
+ // Precondition: the value encoded by this Double must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
// Returns the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
+ // Precondition: the value encoded by this Double must be greater than 0.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ ASSERT(value() > 0.0);
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 54501ec..46feea7 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -186,6 +186,7 @@
// heap.cc
DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
+DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
diff --git a/src/heap.cc b/src/heap.cc
index 226a202..134f40e 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -83,16 +83,19 @@
intptr_t Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB;
intptr_t Heap::code_range_size_ = 0;
+intptr_t Heap::max_executable_size_ = max_old_generation_size_;
#elif defined(V8_TARGET_ARCH_X64)
int Heap::max_semispace_size_ = 16*MB;
intptr_t Heap::max_old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
intptr_t Heap::code_range_size_ = 512*MB;
+intptr_t Heap::max_executable_size_ = 256*MB;
#else
int Heap::max_semispace_size_ = 8*MB;
intptr_t Heap::max_old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
intptr_t Heap::code_range_size_ = 0;
+intptr_t Heap::max_executable_size_ = 128*MB;
#endif
// The snapshot semispace size will be the default semispace size if
@@ -172,6 +175,12 @@
lo_space_->Size();
}
+intptr_t Heap::CommittedMemoryExecutable() {
+ if (!HasBeenSetup()) return 0;
+
+ return MemoryAllocator::SizeExecutable();
+}
+
intptr_t Heap::Available() {
if (!HasBeenSetup()) return 0;
@@ -4313,7 +4322,9 @@
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
+bool Heap::ConfigureHeap(int max_semispace_size,
+ int max_old_gen_size,
+ int max_executable_size) {
if (HasBeenSetup()) return false;
if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
@@ -4334,6 +4345,15 @@
}
if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
+ if (max_executable_size > 0) {
+ max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
+ }
+
+ // The max executable size must be less than or equal to the max old
+ // generation size.
+ if (max_executable_size_ > max_old_generation_size_) {
+ max_executable_size_ = max_old_generation_size_;
+ }
// The new space size must be a power of two to support single-bit testing
// for containment.
@@ -4351,8 +4371,9 @@
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(
- FLAG_max_new_space_size * (KB / 2), FLAG_max_old_space_size * MB);
+ return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
+ FLAG_max_old_space_size * MB,
+ FLAG_max_executable_size * MB);
}
@@ -4435,7 +4456,7 @@
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
// are contiguous and aligned to their size.
- if (!MemoryAllocator::Setup(MaxReserved())) return false;
+ if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
void* chunk =
MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
if (chunk == NULL) return false;
diff --git a/src/heap.h b/src/heap.h
index 714bf0d..c37ced3 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -222,7 +222,9 @@
public:
// Configure heap size before setup. Return false if the heap has been
// setup already.
- static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
+ static bool ConfigureHeap(int max_semispace_size,
+ int max_old_gen_size,
+ int max_executable_size);
static bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
@@ -253,6 +255,7 @@
static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
static int InitialSemiSpaceSize() { return initial_semispace_size_; }
static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ static intptr_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -261,6 +264,9 @@
// Returns the amount of memory currently committed for the heap.
static intptr_t CommittedMemory();
+ // Returns the amount of executable memory currently committed for the heap.
+ static intptr_t CommittedMemoryExecutable();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -1096,6 +1102,7 @@
static int max_semispace_size_;
static int initial_semispace_size_;
static intptr_t max_old_generation_size_;
+ static intptr_t max_executable_size_;
static intptr_t code_range_size_;
// For keeping track of how much data has survived
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 624be0c..79637a1 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -521,7 +521,6 @@
void push(const Immediate& x);
void push(Register src);
void push(const Operand& src);
- void push(Label* label, RelocInfo::Mode relocation_mode);
void pop(Register dst);
void pop(const Operand& dst);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index b2b7392..a7d658b 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -3067,6 +3067,26 @@
}
+void ApiCallEntryStub::Generate(MacroAssembler* masm) {
+ __ PrepareCallApiFunction(kStackSpace, kArgc);
+ STATIC_ASSERT(kArgc == 5);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
+ __ mov(ApiParameterOperand(2), ebx); // v8::Arguments::values_.
+ __ mov(ApiParameterOperand(3), edx); // v8::Arguments::length_.
+ // v8::Arguments::is_construct_call_.
+ __ mov(ApiParameterOperand(4), Immediate(0));
+
+ // v8::InvocationCallback's argument.
+ __ lea(eax, ApiParameterOperand(1));
+ __ mov(ApiParameterOperand(0), eax);
+
+ __ CallApiFunctionAndReturn(fun(), kArgc);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 7295340..6f4ef87 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -8578,9 +8578,11 @@
}
right.Unuse();
frame_->Push(&left);
- if (!node->to_int32()) {
- // If ToInt32 is called on the result of ADD, SUB, or MUL, we don't
+ if (!node->to_int32() || op == Token::MUL) {
+ // If ToInt32 is called on the result of ADD, SUB, we don't
// care about overflows.
+ // Result of MUL can be non-representable precisely in double so
+ // we have to check for overflow.
unsafe_bailout_->Branch(overflow);
}
break;
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 7b9b843..d65eebb 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -488,7 +488,7 @@
// stored in ApiParameterOperand(0), ApiParameterOperand(1) etc.
void PrepareCallApiFunction(int stack_space, int argc);
- // Tail call an API function (jump). Allocates HandleScope, extracts
+ // Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
// Clobbers ebx, esi, edi and caller-save registers.
void CallApiFunctionAndReturn(ApiFunction* function, int argc);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 042335a..f59928f 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -413,6 +413,10 @@
}
+// Number of pointers to be reserved on stack for fast API call.
+static const int kFastApiCallArguments = 3;
+
+
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -423,10 +427,9 @@
// -- esp[4] : last argument in the internal frame of the caller
// -----------------------------------
__ pop(scratch);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(Immediate(Smi::FromInt(0)));
- __ push(Immediate(Smi::FromInt(0)));
- __ push(Immediate(Smi::FromInt(0)));
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ push(Immediate(Smi::FromInt(0)));
+ }
__ push(scratch);
}
@@ -434,75 +437,81 @@
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last fast api call extra argument
+ // -- esp[0] : return address.
+ // -- esp[4] : last fast api call extra argument.
// -- ...
- // -- esp[16] : first fast api call extra argument
- // -- esp[20] : last argument in the internal frame
+ // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
+ // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
+ // frame.
// -----------------------------------
__ pop(scratch);
- __ add(Operand(esp), Immediate(kPointerSize * 4));
+ __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
__ push(scratch);
}
// Generates call to FastHandleApiCall builtin.
-static void GenerateFastApiCall(MacroAssembler* masm,
+static bool GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ Failure** failure) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : object passing the type check
// (last fast api call extra argument,
// set by CheckPrototypes)
- // -- esp[8] : api call data
- // -- esp[12] : api callback
- // -- esp[16] : api function
+ // -- esp[8] : api function
// (first fast api call extra argument)
- // -- esp[20] : last argument
+ // -- esp[12] : api call data
+ // -- esp[16] : last argument
// -- ...
- // -- esp[(argc + 5) * 4] : first argument
- // -- esp[(argc + 6) * 4] : receiver
+ // -- esp[(argc + 3) * 4] : first argument
+ // -- esp[(argc + 4) * 4] : receiver
// -----------------------------------
-
// Get the function and setup the context.
JSFunction* function = optimization.constant_function();
__ mov(edi, Immediate(Handle<JSFunction>(function)));
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- __ mov(Operand(esp, 4 * kPointerSize), edi);
- bool info_loaded = false;
- Object* callback = optimization.api_call_info()->callback();
- if (Heap::InNewSpace(callback)) {
- info_loaded = true;
- __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kCallbackOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ Object* call_data = optimization.api_call_info()->data();
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+ if (Heap::InNewSpace(call_data)) {
+ __ mov(ecx, api_call_info_handle);
+ __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
__ mov(Operand(esp, 3 * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(Handle<Object>(callback)));
- }
- Object* call_data = optimization.api_call_info()->data();
- if (Heap::InNewSpace(call_data)) {
- if (!info_loaded) {
- __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
- }
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, 2 * kPointerSize),
+ __ mov(Operand(esp, 3 * kPointerSize),
Immediate(Handle<Object>(call_data)));
}
- // Set the number of arguments.
- __ mov(eax, Immediate(argc + 4));
+ // Prepare arguments for ApiCallEntryStub.
+ __ lea(eax, Operand(esp, 3 * kPointerSize));
+ __ lea(ebx, Operand(esp, (argc + 3) * kPointerSize));
+ __ Set(edx, Immediate(argc));
- // Jump to the fast api call builtin (tail call).
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::FastHandleApiCall));
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ Object* callback = optimization.api_call_info()->callback();
+ Address api_function_address = v8::ToCData<Address>(callback);
+ ApiFunction fun(api_function_address);
+
+ ApiCallEntryStub stub(api_call_info_handle, &fun);
+
+ __ EnterInternalFrame();
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) {
+ *failure = Failure::cast(result);
+ return false;
+ }
+
+ __ LeaveInternalFrame();
+ __ ret((argc + 4) * kPointerSize);
+ return true;
}
@@ -515,7 +524,7 @@
arguments_(arguments),
name_(name) {}
- void Compile(MacroAssembler* masm,
+ bool Compile(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
@@ -524,7 +533,8 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Label* miss) {
+ Label* miss,
+ Failure** failure) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -535,17 +545,18 @@
CallOptimization optimization(lookup);
if (optimization.is_constant_call()) {
- CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ return CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss,
+ failure);
} else {
CompileRegular(masm,
object,
@@ -556,11 +567,12 @@
name,
holder,
miss);
+ return true;
}
}
private:
- void CompileCacheable(MacroAssembler* masm,
+ bool CompileCacheable(MacroAssembler* masm,
JSObject* object,
Register receiver,
Register scratch1,
@@ -570,7 +582,8 @@
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
- Label* miss_label) {
+ Label* miss_label,
+ Failure** failure) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -632,7 +645,11 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ bool success = GenerateFastApiCall(masm, optimization,
+ arguments_.immediate(), failure);
+ if (!success) {
+ return false;
+ }
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -650,6 +667,8 @@
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
+
+ return true;
}
void CompileRegular(MacroAssembler* masm,
@@ -1046,8 +1065,7 @@
__ EnterInternalFrame();
// Push the stack address where the list of arguments ends.
- __ mov(scratch2, esp);
- __ sub(Operand(scratch2), Immediate(2 * kPointerSize));
+ __ lea(scratch2, Operand(esp, -2 * kPointerSize));
__ push(scratch2);
__ push(receiver); // receiver
__ push(reg); // holder
@@ -1061,12 +1079,11 @@
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const AccessorInfo& to the C++ callback.
- __ mov(eax, esp);
- __ add(Operand(eax), Immediate(4 * kPointerSize));
+ STATIC_ASSERT(ApiGetterEntryStub::kStackSpace == 5);
+ __ lea(eax, Operand(esp, 4 * kPointerSize));
__ mov(ebx, esp);
// Do call through the api.
- ASSERT_EQ(5, ApiGetterEntryStub::kStackSpace);
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
@@ -2208,7 +2225,11 @@
}
if (depth != kInvalidProtoDepth) {
- GenerateFastApiCall(masm(), optimization, argc);
+ Failure* failure;
+ bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
+ if (!success) {
+ return failure;
+ }
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2253,16 +2274,21 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), ecx);
- compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- eax,
- &miss);
+ Failure* failure;
+ bool success = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ edx,
+ ebx,
+ edi,
+ eax,
+ &miss,
+ &failure);
+ if (!success) {
+ return false;
+ }
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index c0e5610..2b79016 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -997,6 +997,8 @@
data()->ShortPrint();
PrintF("\n - flag: ");
flag()->ShortPrint();
+ PrintF("\n - load_stub_cache: ");
+ load_stub_cache()->ShortPrint();
}
void AccessCheckInfo::AccessCheckInfoVerify() {
@@ -1046,6 +1048,7 @@
CHECK(IsCallHandlerInfo());
VerifyPointer(callback());
VerifyPointer(data());
+ VerifyPointer(call_stub_cache());
}
void CallHandlerInfo::CallHandlerInfoPrint() {
@@ -1054,6 +1057,8 @@
callback()->ShortPrint();
PrintF("\n - data: ");
data()->ShortPrint();
+ PrintF("\n - call_stub_cache: ");
+ call_stub_cache()->ShortPrint();
}
void TemplateInfo::TemplateInfoVerify() {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 399ef35..79d70e1 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2557,6 +2557,7 @@
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+ACCESSORS(CallHandlerInfo, call_stub_cache, Object, kCallStubCacheOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
diff --git a/src/objects.h b/src/objects.h
index 6029ad5..9d975ec 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -5423,6 +5423,7 @@
public:
DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(call_stub_cache, Object)
static inline CallHandlerInfo* cast(Object* obj);
@@ -5433,7 +5434,8 @@
static const int kCallbackOffset = HeapObject::kHeaderSize;
static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kCallStubCacheOffset = kDataOffset + kPointerSize;
+ static const int kSize = kCallStubCacheOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index a4d9a82..29f9ab4 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1848,22 +1848,6 @@
}
-int HeapSnapshotGenerator::GetGlobalSecurityToken() {
- return collection_->token_enumerator()->GetTokenId(
- Top::context()->global()->global_context()->security_token());
-}
-
-
-int HeapSnapshotGenerator::GetObjectSecurityToken(HeapObject* obj) {
- if (obj->IsGlobalContext()) {
- return collection_->token_enumerator()->GetTokenId(
- Context::cast(obj)->security_token());
- } else {
- return TokenEnumerator::kNoSecurityToken;
- }
-}
-
-
class IndexedReferencesExtractor : public ObjectVisitor {
public:
IndexedReferencesExtractor(HeapSnapshotGenerator* generator,
@@ -1893,19 +1877,11 @@
void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
// We need to reference JS global objects from snapshot's root.
- // We also need to only include global objects from the current
- // security context. And we don't want to add the global proxy,
- // as we don't have a special type for it.
+ // We use JSGlobalProxy because this is what embedder (e.g. browser)
+ // uses for the global object.
if (obj->IsJSGlobalProxy()) {
- int global_security_token = GetGlobalSecurityToken();
JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
- int object_security_token =
- collection_->token_enumerator()->GetTokenId(
- Context::cast(proxy->context())->security_token());
- if (object_security_token == TokenEnumerator::kNoSecurityToken
- || object_security_token == global_security_token) {
- SetRootReference(proxy->map()->prototype());
- }
+ SetRootReference(proxy->map()->prototype());
return;
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 6f63f6a..b691a05 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -948,8 +948,6 @@
private:
HeapEntry* GetEntry(Object* obj);
- int GetGlobalSecurityToken();
- int GetObjectSecurityToken(HeapObject* obj);
void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
diff --git a/src/spaces.cc b/src/spaces.cc
index e3fb923..2f3e41a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -270,8 +270,9 @@
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-intptr_t MemoryAllocator::capacity_ = 0;
-intptr_t MemoryAllocator::size_ = 0;
+intptr_t MemoryAllocator::capacity_ = 0;
+intptr_t MemoryAllocator::capacity_executable_ = 0;
+intptr_t MemoryAllocator::size_ = 0;
intptr_t MemoryAllocator::size_executable_ = 0;
List<MemoryAllocator::MemoryAllocationCallbackRegistration>
@@ -302,8 +303,10 @@
}
-bool MemoryAllocator::Setup(intptr_t capacity) {
+bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
capacity_ = RoundUp(capacity, Page::kPageSize);
+ capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
+ ASSERT_GE(capacity_, capacity_executable_);
// Over-estimate the size of chunks_ array. It assumes the expansion of old
// space is always in the unit of a chunk (kChunkSize) except the last
@@ -346,6 +349,7 @@
ASSERT(top_ == max_nof_chunks_); // all chunks are free
top_ = 0;
capacity_ = 0;
+ capacity_executable_ = 0;
size_ = 0;
max_nof_chunks_ = 0;
}
@@ -357,16 +361,31 @@
if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
return NULL;
}
+
void* mem;
- if (executable == EXECUTABLE && CodeRange::exists()) {
- mem = CodeRange::AllocateRawMemory(requested, allocated);
+ if (executable == EXECUTABLE) {
+ // Check executable memory limit.
+ if (size_executable_ + requested >
+ static_cast<size_t>(capacity_executable_)) {
+ LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
+ "V8 Executable Allocation capacity exceeded"));
+ return NULL;
+ }
+ // Allocate executable memory either from code range or from the
+ // OS.
+ if (CodeRange::exists()) {
+ mem = CodeRange::AllocateRawMemory(requested, allocated);
+ } else {
+ mem = OS::Allocate(requested, allocated, true);
+ }
+ // Update executable memory size.
+ size_executable_ += static_cast<int>(*allocated);
} else {
- mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+ mem = OS::Allocate(requested, allocated, false);
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
- if (executable == EXECUTABLE) size_executable_ += alloced;
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
@@ -391,6 +410,7 @@
if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
ASSERT(size_ >= 0);
+ ASSERT(size_executable_ >= 0);
}
diff --git a/src/spaces.h b/src/spaces.h
index 3ed2fe8..0c10d2c 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -491,8 +491,8 @@
class MemoryAllocator : public AllStatic {
public:
// Initializes its internal bookkeeping structures.
- // Max capacity of the total space.
- static bool Setup(intptr_t max_capacity);
+ // Max capacity of the total space and executable memory limit.
+ static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
// Deletes valid chunks.
static void TearDown();
@@ -590,6 +590,12 @@
// Returns allocated spaces in bytes.
static intptr_t Size() { return size_; }
+ // Returns the maximum available executable bytes of heaps.
+ static int AvailableExecutable() {
+ if (capacity_executable_ < size_executable_) return 0;
+ return capacity_executable_ - size_executable_;
+ }
+
// Returns allocated executable spaces in bytes.
static intptr_t SizeExecutable() { return size_executable_; }
@@ -653,6 +659,8 @@
private:
// Maximum space size in bytes.
static intptr_t capacity_;
+ // Maximum subset of capacity_ that can be executable
+ static intptr_t capacity_executable_;
// Allocated space size in bytes.
static intptr_t size_;
diff --git a/src/strtod.cc b/src/strtod.cc
index 0ed1b0d..0523d88 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -31,6 +31,7 @@
#include "v8.h"
#include "strtod.h"
+#include "bignum.h"
#include "cached-powers.h"
#include "double.h"
@@ -83,44 +84,12 @@
// 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
10000000000000000000000.0
};
-
static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
-
-extern "C" double gay_strtod(const char* s00, const char** se);
-
-static double old_strtod(Vector<const char> buffer, int exponent) {
- // gay_strtod is broken on Linux,x86. For numbers with few decimal digits
- // the computation is done using floating-point operations which (on Linux)
- // are prone to double-rounding errors.
- // By adding several zeroes to the buffer gay_strtod falls back to a slower
- // (but correct) algorithm.
- const int kInsertedZeroesCount = 20;
- char gay_buffer[1024];
- Vector<char> gay_buffer_vector(gay_buffer, sizeof(gay_buffer));
- int pos = 0;
- for (int i = 0; i < buffer.length(); ++i) {
- gay_buffer_vector[pos++] = buffer[i];
- }
- for (int i = 0; i < kInsertedZeroesCount; ++i) {
- gay_buffer_vector[pos++] = '0';
- }
- exponent -= kInsertedZeroesCount;
- gay_buffer_vector[pos++] = 'e';
- if (exponent < 0) {
- gay_buffer_vector[pos++] = '-';
- exponent = -exponent;
- }
- const int kNumberOfExponentDigits = 5;
- for (int i = kNumberOfExponentDigits - 1; i >= 0; i--) {
- gay_buffer_vector[pos + i] = exponent % 10 + '0';
- exponent /= 10;
- }
- pos += kNumberOfExponentDigits;
- gay_buffer_vector[pos] = '\0';
- return gay_strtod(gay_buffer, NULL);
-}
-
+// Maximum number of significant digits in the decimal representation.
+// In fact the value is 772 (see conversions.cc), but to give us some margin
+// we round up to 780.
+static const int kMaxSignificantDecimalDigits = 780;
static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
for (int i = 0; i < buffer.length(); i++) {
@@ -142,6 +111,23 @@
}
+static void TrimToMaxSignificantDigits(Vector<const char> buffer,
+ int exponent,
+ char* significant_buffer,
+ int* significant_exponent) {
+ for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
+ significant_buffer[i] = buffer[i];
+ }
+ // The input buffer has been trimmed. Therefore the last digit must be
+ // different from '0'.
+ ASSERT(buffer[buffer.length() - 1] != '0');
+ // Set the last digit to be non-zero. This is sufficient to guarantee
+ // correct rounding.
+ significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
+ *significant_exponent =
+ exponent + (buffer.length() - kMaxSignificantDecimalDigits);
+}
+
// Reads digits from the buffer and converts them to a uint64.
// Reads in as many digits as fit into a uint64.
// When the string starts with "1844674407370955161" no further digit is read.
@@ -374,20 +360,81 @@
}
+// Returns the correct double for the buffer*10^exponent.
+// The variable guess should be a close guess that is either the correct double
+// or its lower neighbor (the nearest double less than the correct one).
+// Preconditions:
+// buffer.length() + exponent <= kMaxDecimalPower + 1
+// buffer.length() + exponent > kMinDecimalPower
+// buffer.length() <= kMaxDecimalSignificantDigits
+static double BignumStrtod(Vector<const char> buffer,
+ int exponent,
+ double guess) {
+ if (guess == V8_INFINITY) {
+ return guess;
+ }
+
+ DiyFp upper_boundary = Double(guess).UpperBoundary();
+
+ ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
+ ASSERT(buffer.length() + exponent > kMinDecimalPower);
+ ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
+ // Make sure that the Bignum will be able to hold all our numbers.
+ // Our Bignum implementation has a separate field for exponents. Shifts will
+ // consume at most one bigit (< 64 bits).
+ // ln(10) == 3.3219...
+ ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+ Bignum input;
+ Bignum boundary;
+ input.AssignDecimalString(buffer);
+ boundary.AssignUInt64(upper_boundary.f());
+ if (exponent >= 0) {
+ input.MultiplyByPowerOfTen(exponent);
+ } else {
+ boundary.MultiplyByPowerOfTen(-exponent);
+ }
+ if (upper_boundary.e() > 0) {
+ boundary.ShiftLeft(upper_boundary.e());
+ } else {
+ input.ShiftLeft(-upper_boundary.e());
+ }
+ int comparison = Bignum::Compare(input, boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return Double(guess).NextDouble();
+ } else if ((Double(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return Double(guess).NextDouble();
+ }
+}
+
+
double Strtod(Vector<const char> buffer, int exponent) {
Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
exponent += left_trimmed.length() - trimmed.length();
if (trimmed.length() == 0) return 0.0;
+ if (trimmed.length() > kMaxSignificantDecimalDigits) {
+ char significant_buffer[kMaxSignificantDecimalDigits];
+ int significant_exponent;
+ TrimToMaxSignificantDigits(trimmed, exponent,
+ significant_buffer, &significant_exponent);
+ trimmed =
+ Vector<const char>(significant_buffer, kMaxSignificantDecimalDigits);
+ exponent = significant_exponent;
+ }
if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
- double result;
- if (DoubleStrtod(trimmed, exponent, &result) ||
- DiyFpStrtod(trimmed, exponent, &result)) {
- return result;
+ double guess;
+ if (DoubleStrtod(trimmed, exponent, &guess) ||
+ DiyFpStrtod(trimmed, exponent, &guess)) {
+ return guess;
}
- return old_strtod(trimmed, exponent);
+ return BignumStrtod(trimmed, exponent, guess);
}
} } // namespace v8::internal
diff --git a/src/version.cc b/src/version.cc
index 5ef2a65..b45510c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 5
-#define BUILD_NUMBER 5
+#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false