Version 3.15.4
Fixed Array.prototype.join evaluation order. (issue 2263)
Perform CPU sampling by CPU sampling thread only iff processing thread is not running. (issue 2364)
When using an Object as a set in Object.getOwnPropertyNames, null out the proto. (issue 2410)
Disabled EXTRA_CHECKS in Release build.
Heap explorer: Show representation of strings.
Removed 'type' and 'arguments' properties from Error object. (issue 2397)
Added atomics implementation for ThreadSanitizer v2. (Chromium issue 128314)
Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@12994 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index c2f245c..855ddb2 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -95,47 +95,6 @@
}
-static MaybeObject* ArraySetLengthObserved(Isolate* isolate,
- Handle<JSArray> array,
- Handle<Object> new_length_handle) {
- List<Handle<String> > indices;
- List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(array->length(), isolate);
- uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- uint32_t new_length = 0;
- CHECK(new_length_handle->ToArrayIndex(&new_length));
- // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
- // Find another way to iterate over arrays with dictionary elements.
- for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- PropertyAttributes attributes = array->GetLocalElementAttribute(i);
- if (attributes == ABSENT) continue;
- // A non-configurable property will cause the truncation operation to
- // stop at this index.
- if (attributes == DONT_DELETE) break;
- // TODO(adamk): Don't fetch the old value if it's an accessor.
- old_values.Add(Object::GetElement(array, i));
- indices.Add(isolate->factory()->Uint32ToString(i));
- }
-
- MaybeObject* result = array->SetElementsLength(*new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
-
- CHECK(array->length()->ToArrayIndex(&new_length));
- if (old_length != new_length) {
- for (int i = 0; i < indices.length(); ++i) {
- JSObject::EnqueueChangeRecord(
- array, "deleted", indices[i], old_values[i]);
- }
- JSObject::EnqueueChangeRecord(
- array, "updated", isolate->factory()->length_symbol(),
- old_length_handle);
- }
- return *hresult;
-}
-
-
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
@@ -163,11 +122,7 @@
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- if (FLAG_harmony_observation && array_handle->map()->is_observed()) {
- return ArraySetLengthObserved(isolate, array_handle, uint32_v);
- } else {
- return array_handle->SetElementsLength(*uint32_v);
- }
+ return array_handle->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
diff --git a/src/api.cc b/src/api.cc
index a6619f1..3c44960 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -905,7 +905,7 @@
}
// Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result);
+ i::Handle<i::Object> handle(result, isolate_);
return handle.location();
}
@@ -3414,7 +3414,7 @@
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol), isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -4649,13 +4649,14 @@
v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::Global()")) {
return Local<v8::Object>();
}
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy());
+ i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b679efa..2fe80b0 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2390,6 +2390,20 @@
}
+void Assembler::vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-892.
+ // cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) |
+ // Vm(3-0)
+ unsigned x = (cond | 0x1C*B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ emit(x);
+}
+
+
void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8418aee..bc12480 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1126,6 +1126,10 @@
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
+ void vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 1e73a55..2c2589c 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -41,8 +41,7 @@
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -627,24 +626,6 @@
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -748,13 +729,13 @@
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register scratch2,
SwVfpRegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
+ ASSERT(!int_scratch.is(dst_mantissa));
+ ASSERT(!int_scratch.is(dst_exponent));
Label done;
@@ -763,56 +744,57 @@
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst2 | dst1 |
+ // | dst_exponent | dst_mantissa |
// | s | exp | mantissa |
// Check for zero.
__ cmp(int_scratch, Operand::Zero());
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
+ __ mov(dst_exponent, int_scratch);
+ __ mov(dst_mantissa, int_scratch);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
- __ rsb(dst1, dst1, Operand(31));
+ __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
+ __ rsb(dst_mantissa, dst_mantissa, Operand(31));
// Set the exponent.
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst2, scratch2, scratch2,
+ __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst_exponent, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
+ SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
+ __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
+ __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst2, dst2, scratch2);
+ __ orr(dst_exponent, dst_exponent, scratch2);
// Set dst1 to 0.
- __ mov(dst1, Operand::Zero());
+ __ mov(dst_mantissa, Operand::Zero());
}
__ bind(&done);
}
@@ -823,8 +805,8 @@
Destination destination,
DwVfpRegister double_dst,
DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -840,8 +822,8 @@
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
+ dst_exponent, scratch2, single_scratch);
__ b(&done);
__ bind(&obj_is_not_smi);
@@ -868,26 +850,52 @@
__ b(ne, not_int32);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // Load the double value in the destination registers.
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
+ if (save_registers) {
+ // Save both output registers, because the other one probably holds
+ // an important value too.
+ __ Push(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
// Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
+ Label zero;
+ __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst_mantissa));
__ cmp(scratch1, Operand::Zero());
- __ b(eq, &done);
+ __ b(eq, &zero);
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+ Label restore_input_and_miss;
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
+ &restore_input_and_miss);
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // dst_* were trashed. Reload the double value.
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ b(&done);
+
+ __ bind(&restore_input_and_miss);
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ b(not_int32);
+
+ __ bind(&zero);
+ if (save_registers) {
+ __ Drop(2);
+ }
}
__ bind(&done);
@@ -910,14 +918,15 @@
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
@@ -964,20 +973,28 @@
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
}
+ __ b(&done);
+
+ __ bind(&maybe_undefined);
+ __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ __ b(ne, not_int32);
+ // |undefined| is truncated to 0.
+ __ mov(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
+ Register src_exponent,
+ Register src_mantissa,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ubfx(scratch,
- src1,
+ src_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -997,11 +1014,11 @@
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
+ __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
__ cmp(tmp, Operand(30));
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
+ __ tst(src_mantissa, Operand(0x3fffff));
__ b(ne, not_int32);
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1012,19 +1029,19 @@
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
- src2,
+ src_mantissa,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
__ orr(dst,
dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
// Create the mask and test the lower bits (of the higher bits).
__ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
+ __ mov(src_mantissa, Operand(1));
+ __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
+ __ sub(src_exponent, src_exponent, Operand(1));
+ __ tst(dst, src_exponent);
__ b(ne, not_int32);
}
@@ -1148,48 +1165,43 @@
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, ¬_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1204,47 +1216,45 @@
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(¬_identical);
}
@@ -1678,42 +1688,60 @@
}
-// On entry lhs_ and rhs_ are the values to be compared.
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, ¬_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(¬_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, ¬_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(¬_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1724,7 +1752,7 @@
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -1747,7 +1775,7 @@
// If one of the sides was a NaN then the v flag is set. Load r0 with
// whatever it takes to make the comparison fail, since comparisons with NaN
// always fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -1756,19 +1784,19 @@
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
+ EmitNanCheck(masm, &lhs_not_nan, cc);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
Label check_for_symbols;
@@ -1778,8 +1806,8 @@
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
@@ -1787,31 +1815,31 @@
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
- if (cc_ == eq && !strict_) {
+ if (cc == eq && !strict()) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4,
@@ -1821,18 +1849,18 @@
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1842,6 +1870,9 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -2325,20 +2356,23 @@
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
+ __ push(r2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -2349,59 +2383,8 @@
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2411,7 +2394,7 @@
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ add(right, left, Operand(right), SetCC); // Add optimistically.
__ Ret(vc);
@@ -2526,10 +2509,24 @@
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2541,11 +2538,17 @@
__ AssertSmi(left);
__ AssertSmi(right);
}
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2555,25 +2558,44 @@
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to d7 or r2/r3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, d7, d8, r2, r3, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, d7, r2, r3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, d6, d8, r0, r1, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, d6, r0, r1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
@@ -2582,7 +2604,7 @@
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP2);
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
@@ -2606,7 +2628,7 @@
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2647,7 +2669,7 @@
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
@@ -2698,8 +2720,9 @@
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// r2: Answer as signed int32.
@@ -2714,7 +2737,7 @@
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
@@ -2739,12 +2762,14 @@
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = r1;
@@ -2757,12 +2782,14 @@
__ JumpIfNotSmi(scratch1, ¬_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, ¬_smis, op, mode);
}
__ bind(¬_smis);
}
@@ -2774,14 +2801,14 @@
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2789,23 +2816,14 @@
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2834,7 +2852,7 @@
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
@@ -2856,7 +2874,7 @@
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2866,6 +2884,15 @@
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
@@ -2964,12 +2991,13 @@
: BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
@@ -2988,12 +3016,13 @@
// Allocate a heap number to store the result.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
@@ -3098,12 +3127,13 @@
__ bind(&return_heap_number);
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -3147,6 +3177,7 @@
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3185,20 +3216,32 @@
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3206,6 +3249,7 @@
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3241,61 +3285,20 @@
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3308,7 +3311,7 @@
__ mov(result, Operand(overwritable_operand));
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -5147,7 +5150,7 @@
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -5425,48 +5428,6 @@
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5916,7 +5877,7 @@
// Check if the two characters match.
// Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
__ cmp(chars, scratch);
__ b(eq, &found_in_symbol_table);
__ bind(&next_probe[i]);
@@ -6129,8 +6090,8 @@
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
@@ -6144,13 +6105,13 @@
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
@@ -6275,7 +6236,7 @@
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6428,8 +6389,8 @@
&call_runtime);
// Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6448,7 +6409,7 @@
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6498,11 +6459,6 @@
__ tst(r4, Operand(kAsciiDataHintMask));
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
@@ -6535,10 +6491,10 @@
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r7,
r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &first_prepared);
@@ -6551,10 +6507,10 @@
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r1,
r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &second_prepared);
@@ -6577,7 +6533,7 @@
__ b(eq, &non_ascii_string_add_flat_result);
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
// r7: first character of first string.
// r1: first character of second string.
@@ -6668,7 +6624,7 @@
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6689,31 +6645,53 @@
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
+ // stub if NaN is involved or VFP2 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
+ SwVfpRegister single_scratch = d2.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d1, single_scratch);
- // Compare operands
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
+ single_scratch = d3.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d0, single_scratch);
+
+ __ bind(&done);
+ // Compare operands.
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
@@ -6727,14 +6705,16 @@
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
@@ -6752,7 +6732,7 @@
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
Label miss;
// Registers containing left and right operands respectively.
@@ -6790,7 +6770,7 @@
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6868,7 +6848,7 @@
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -7564,7 +7544,7 @@
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1,
+ __ StoreNumberToDoubleElements(r0, r3,
// Overwrites all regs after this.
r5, r6, r7, r9, r2,
&slow_elements);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 3e79624..0443cf7 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -142,108 +142,6 @@
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp2_ = CpuFeatures::IsSupported(VFP2);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp2_(VFP2Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp2_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP2Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP2Bits::encode(use_vfp2_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -724,20 +622,6 @@
Register scratch1,
Register scratch2);
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -836,7 +720,12 @@
Register heap_number_result,
Register scratch);
- private:
+ // Loads the objects from |object| into floating point registers.
+ // Depending on |destination| the value ends up either in |dst| or
+ // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
+ // must be supported. If kCoreRegisters are requested and VFP3 is
+ // supported, |dst| will be scratched. If |object| is neither smi nor
+ // heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 209e151..52a6295 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -416,7 +416,7 @@
__ b(ne, &external_string);
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index af2ed52..cb0a6cb 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1098,6 +1098,7 @@
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
+// Dd = vmla(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@@ -1160,6 +1161,12 @@
} else {
Unknown(instr); // Not used by V8.
}
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 03d5067..83b438b 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -130,7 +130,7 @@
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -2377,7 +2377,7 @@
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -3624,7 +3624,7 @@
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
@@ -3653,7 +3653,7 @@
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3691,10 +3691,10 @@
array_length = no_reg;
__ add(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -3710,7 +3710,9 @@
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
@@ -3720,7 +3722,7 @@
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@@ -3740,7 +3742,9 @@
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@@ -3761,14 +3765,16 @@
__ SmiUntag(string_length);
__ add(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
@@ -4073,7 +4079,8 @@
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
- __ mov(r1, Operand(Smi::FromInt(count_value)));
+ __ mov(r1, r0);
+ __ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
@@ -4298,29 +4305,7 @@
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cond = eq;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 4839589..29a3687 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1379,7 +1379,6 @@
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
- receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,
@@ -1699,36 +1698,15 @@
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
}
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index ff6da03..5cd6914 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1303,8 +1303,21 @@
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
+ if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
+ HAdd* add = HAdd::cast(instr->uses().value());
+ if (instr == add->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded
+ // into a multiply-add.
+ return NULL;
+ }
+ if (instr == add->right() && !add->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add.
+ return NULL;
+ }
+ }
+ return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1330,6 +1343,13 @@
}
}
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
@@ -1344,6 +1364,14 @@
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul())
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@@ -1409,7 +1437,7 @@
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -2168,7 +2196,7 @@
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 0ba5e45..0367fd3 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -135,6 +135,7 @@
V(MathMinMax) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@@ -621,6 +622,24 @@
};
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -636,7 +655,7 @@
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -2396,6 +2415,8 @@
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 7615134..0a28d34 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -230,7 +230,30 @@
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -1297,6 +1320,18 @@
}
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DwVfpRegister addend = ToDoubleRegister(instr->addend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ vmla(addend, multiplier, multiplicand);
+}
+
+
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->left());
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index dcc7149..158476e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1786,10 +1786,10 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -1955,13 +1955,13 @@
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -1988,8 +1988,10 @@
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ str(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -2010,7 +2012,8 @@
bind(&smi_value);
add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
@@ -3338,8 +3341,10 @@
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
+ kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
@@ -3353,8 +3358,10 @@
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
+ kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 1d97a6c..ab8418d 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -831,14 +831,14 @@
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index bd7f1bd..efbfff2 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2778,6 +2778,20 @@
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ // vmla
+ if (instr->SzValue() != 0x1) {
+ UNREACHABLE(); // Not used by V8.
+ }
+
+ double dd_value = get_double_from_d_register(vd);
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+
+ // Note: we do the mul and add in separate steps to avoid getting a result
+ // with too high precision.
+ set_d_register_from_double(vd, dn_value * dm_value);
+ set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value);
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index b039b3d..60a2129 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1623,7 +1623,7 @@
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6;
Register end_elements = r5;
@@ -1634,10 +1634,9 @@
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
-
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
@@ -1652,7 +1651,6 @@
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
- Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@@ -1672,6 +1670,40 @@
__ Drop(argc + 1);
__ Ret();
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into r0 and calculate new length.
+ __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(r0, r0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(r0, r4);
+ __ b(gt, &call_builtin);
+
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ r4, r0, elements, r3, r5, r2, r9,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ Drop(argc + 1);
+ __ Ret();
+
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1683,6 +1715,11 @@
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(¬_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
+
+ __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r7, ip);
+ __ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
@@ -3819,20 +3856,20 @@
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
- Register dst1 = r1;
- Register dst2 = r3;
+ Register dst_mantissa = r1;
+ Register dst_exponent = r3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
d0,
- dst1,
- dst2,
+ dst_mantissa,
+ dst_exponent,
r9,
s0);
- __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@@ -4101,7 +4138,7 @@
}
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst1, dst2.
+ d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2);
@@ -4698,7 +4735,6 @@
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
- receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
diff --git a/src/array.js b/src/array.js
index 37053ce..47f796d 100644
--- a/src/array.js
+++ b/src/array.js
@@ -413,6 +413,7 @@
["Array.prototype.join"]);
}
+ var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -422,7 +423,7 @@
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
- return Join(this, TO_UINT32(this.length), separator, ConvertToString);
+ return Join(this, length, separator, ConvertToString);
}
diff --git a/src/ast.cc b/src/ast.cc
index 52990b8..9cdae54 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -476,6 +476,7 @@
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
+ if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) {
@@ -604,18 +605,6 @@
}
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->CompareType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
-}
-
-
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this)
diff --git a/src/ast.h b/src/ast.h
index 802ac65..6fb9309 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1777,9 +1777,6 @@
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -1796,8 +1793,7 @@
op_(op),
left_(left),
right_(right),
- pos_(pos),
- compare_type_(NONE) {
+ pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1806,9 +1802,6 @@
Expression* left_;
Expression* right_;
int pos_;
-
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
};
diff --git a/src/atomicops.h b/src/atomicops.h
index 1f0c44a..da33b29 100644
--- a/src/atomicops.h
+++ b/src/atomicops.h
@@ -151,7 +151,9 @@
} } // namespace v8::internal
// Include our platform specific implementation.
-#if defined(_MSC_VER) && \
+#if defined(THREAD_SANITIZER)
+#include "atomicops_internals_tsan.h"
+#elif defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
diff --git a/src/atomicops_internals_tsan.h b/src/atomicops_internals_tsan.h
new file mode 100644
index 0000000..6559336
--- /dev/null
+++ b/src/atomicops_internals_tsan.h
@@ -0,0 +1,335 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_ATOMICOPS_INTERNALS_TSAN_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+typedef enum {
+ __tsan_memory_order_relaxed = (1 << 0) + 100500,
+ __tsan_memory_order_consume = (1 << 1) + 100500,
+ __tsan_memory_order_acquire = (1 << 2) + 100500,
+ __tsan_memory_order_release = (1 << 3) + 100500,
+ __tsan_memory_order_acq_rel = (1 << 4) + 100500,
+ __tsan_memory_order_seq_cst = (1 << 5) + 100500,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void MemoryBarrier() {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+} // namespace internal
+} // namespace v8
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 179e65c..d61c031 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -54,7 +54,7 @@
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
+ SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
diff --git a/src/builtins.cc b/src/builtins.cc
index 620e4b3..fb6f8f8 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -325,6 +325,18 @@
}
+static void MoveDoubleElements(FixedDoubleArray* dst,
+ int dst_index,
+ FixedDoubleArray* src,
+ int src_index,
+ int len) {
+ if (len == 0) return;
+ memmove(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kDoubleSize);
+}
+
+
static void MoveElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
@@ -351,24 +363,39 @@
}
-static FixedArray* LeftTrimFixedArray(Heap* heap,
- FixedArray* elms,
- int to_trim) {
+static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
+ for (int i = from; i < to; i++) {
+ dst->set_the_hole(i);
+ }
+}
+
+
+static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
+ FixedArrayBase* elms,
+ int to_trim) {
+ Map* map = elms->map();
+ int entry_size;
+ if (elms->IsFixedArray()) {
+ entry_size = kPointerSize;
+ } else {
+ entry_size = kDoubleSize;
+ }
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
- STATIC_ASSERT(FixedArray::kMapOffset == 0);
- STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
- if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
+ if (to_trim > FixedArrayBase::kHeaderSize / entry_size &&
+ elms->IsFixedArray() &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@@ -382,14 +409,15 @@
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
- former_start[to_trim] = heap->fixed_array_map();
- former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
+ int new_start_index = to_trim * (entry_size / kPointerSize);
+ former_start[new_start_index] = map;
+ former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
- int size_delta = to_trim * kPointerSize;
+ int size_delta = to_trim * entry_size;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
@@ -397,8 +425,8 @@
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
- return FixedArray::cast(HeapObject::FromAddress(
- elms->address() + to_trim * kPointerSize));
+ return FixedArrayBase::cast(HeapObject::FromAddress(
+ elms->address() + to_trim * entry_size));
}
@@ -427,19 +455,14 @@
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
- if (array->HasFastDoubleElements()) {
- ASSERT(elms == heap->empty_fixed_array());
- MaybeObject* maybe_transition =
- array->TransitionElementsKind(FAST_ELEMENTS);
- if (maybe_transition->IsFailure()) return maybe_transition;
- return elms;
- }
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
- maybe_writable_result->IsFailure()) {
+ !maybe_writable_result->To(&elms)) {
return maybe_writable_result;
}
+ } else if (map == heap->fixed_double_array_map()) {
+ if (args == NULL) return elms;
} else {
return NULL;
}
@@ -449,13 +472,28 @@
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
- MaybeObject* maybe_array = array->EnsureCanContainElements(
- args,
- first_added_arg,
- args_length - first_added_arg,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
- return array->elements();
+ ElementsKind origin_kind = array->map()->elements_kind();
+ ASSERT(!IsFastObjectElementsKind(origin_kind));
+ ElementsKind target_kind = origin_kind;
+ int arg_count = args->length() - first_added_arg;
+ Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
+ for (int i = 0; i < arg_count; i++) {
+ Object* arg = arguments[i];
+ if (arg->IsHeapObject()) {
+ if (arg->IsHeapNumber()) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
+ }
+ }
+ }
+ if (target_kind != origin_kind) {
+ MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return array->elements();
+ }
+ return elms;
}
@@ -499,75 +537,131 @@
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
- if (FLAG_harmony_observation && array->map()->is_observed()) {
+ if (FLAG_harmony_observation &&
+ JSObject::cast(receiver)->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ JSArray* array = JSArray::cast(receiver);
+ ElementsKind kind = array->GetElementsKind();
- int new_length = len + to_add;
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
}
- FixedArray* new_elms = FixedArray::cast(obj);
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
- FillWithHoles(heap, new_elms, new_length, capacity);
+ int new_length = len + to_add;
- elms = new_elms;
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, new_elms, kind, 0, len, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+ FillWithHoles(heap, new_elms, new_length, capacity);
+
+ elms = new_elms;
+ }
+
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < to_add; index++) {
+ elms->set(index + len, args[index + 1], mode);
+ }
+
+ if (elms != array->elements()) {
+ array->set_elements(elms);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+ } else {
+ int len = Smi::cast(array->length())->value();
+ int elms_len = elms_obj->length();
+
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
+
+ int new_length = len + to_add;
+
+ FixedDoubleArray* new_elms;
+
+ if (new_length > elms_len) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedDoubleArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, new_elms, kind, 0, len, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
+ FillWithHoles(new_elms, len + to_add, new_elms->length());
+ } else {
+ // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
+ // empty_fixed_array.
+ new_elms = FixedDoubleArray::cast(elms_obj);
+ }
+
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ int index;
+ for (index = 0; index < to_add; index++) {
+ Object* arg = args[index + 1];
+ new_elms->set(index + len, arg->Number());
+ }
+
+ if (new_elms != array->elements()) {
+ array->set_elements(new_elms);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
}
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
-
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
+
JSArray* array = JSArray::cast(receiver);
if (FLAG_harmony_observation && array->map()->is_observed()) {
@@ -577,38 +671,36 @@
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
- // Get top element
- Object* top = elms->get(len - 1);
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- if (!top->IsTheHole()) {
- // Delete the top element.
- elms->set_the_hole(len - 1);
- return top;
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = len - 1;
+ MaybeObject* maybe_result;
+ if (accessor->HasElement(array, array, new_length, elms_obj)) {
+ maybe_result = accessor->Get(array, array, new_length, elms_obj);
+ } else {
+ maybe_result = array->GetPrototype()->GetElement(len - 1);
}
-
- return array->GetPrototype()->GetElement(len - 1);
+ if (maybe_result->IsFailure()) return maybe_result;
+ MaybeObject* maybe_failure =
+ accessor->SetLength(array, Smi::FromInt(new_length));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return maybe_result;
}
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayShift", args);
@@ -618,18 +710,28 @@
if (len == 0) return heap->undefined_value();
// Get first element
- Object* first = elms->get(0);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ Object* first;
+ MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
+ if (!maybe_first->To(&first)) return maybe_first;
if (first->IsTheHole()) {
first = heap->undefined_value();
}
- if (!heap->lo_space()->Contains(elms)) {
- array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+ if (!heap->lo_space()->Contains(elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
} else {
// Shift the elements.
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
+ if (elms_obj->IsFixedArray()) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
+ } else {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ elms->set_the_hole(len - 1);
+ }
}
// Set the length.
@@ -642,19 +744,21 @@
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ if (!array->HasFastSmiOrObjectElements()) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ }
+ FixedArray* elms = FixedArray::cast(elms_obj);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
@@ -675,13 +779,17 @@
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_elms->To(&new_elms)) return maybe_elms;
+
ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, new_elms, kind, to_add, len, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@@ -706,16 +814,20 @@
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArray* elms;
+ FixedArrayBase* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastSmiOrObjectElements() ||
- !IsJSArrayFastElementMovingAllowed(heap, array)) {
+ if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(array->elements());
+ if (array->HasFastElements()) {
+ elms = array->elements();
+ } else {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@@ -724,15 +836,19 @@
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject()
- && JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
+ receiver->IsJSObject() &&
+ JSObject::cast(receiver)->map() == arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(JSObject::cast(receiver)->elements());
- Object* len_obj = JSObject::cast(receiver)
- ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ JSObject* object = JSObject::cast(receiver);
+
+ if (object->HasFastElements()) {
+ elms = object->elements();
+ } else {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -740,12 +856,27 @@
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
+ }
+
+ JSObject* object = JSObject::cast(receiver);
+ ElementsKind kind = object->GetElementsKind();
+
+ if (IsHoleyElementsKind(kind)) {
+ bool packed = true;
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = 0; i < len; i++) {
- if (elms->get(i) == heap->the_hole_value()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
+ if (!accessor->HasElement(object, object, i, elms)) {
+ packed = false;
+ break;
}
}
+ if (packed) {
+ kind = GetPackedElementsKind(kind);
+ } else if (!receiver->IsJSArray()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
}
+
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -758,6 +889,12 @@
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -765,6 +902,12 @@
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
+ } else if (arg2->IsHeapNumber()) {
+ double end = HeapNumber::cast(arg2)->value();
+ if (end < kMinInt || end > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_end = static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -779,21 +922,21 @@
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
- ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
-
// Calculate the length of result array.
int result_len = Max(final - k, 0);
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len);
JSArray* result_array;
+ MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
+ result_len,
+ result_len);
if (!maybe_array->To(&result_array)) return maybe_array;
- CopyObjectToObjectElements(elms, elements_kind, k,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, result_len);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(object, k, result_array->elements(),
+ kind, 0, result_len, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
return result_array;
}
@@ -802,19 +945,18 @@
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArraySplice", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+ if (maybe_elms == NULL) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
@@ -829,6 +971,12 @@
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+ relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -858,51 +1006,83 @@
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
+ ElementsKind elements_kind = array->GetElementsKind();
+
+ int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+ int new_length = len - actual_delete_count + item_count;
+
+ // For double mode we do not support changing the length.
+ if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+
+ if (new_length == 0) {
+ MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ elms_obj, elements_kind, actual_delete_count);
+ if (maybe_array->IsFailure()) return maybe_array;
+ array->set_elements(heap->empty_fixed_array());
+ array->set_length(Smi::FromInt(0));
+ return maybe_array;
+ }
+
JSArray* result_array = NULL;
- ElementsKind elements_kind =
- JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
- {
- // Fill newly created array.
- CopyObjectToObjectElements(elms, elements_kind, actual_start,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, actual_delete_count);
+ if (actual_delete_count > 0) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, actual_start, result_array->elements(),
+ elements_kind, 0, actual_delete_count, elms_obj);
+ // Cannot fail since the origin and target array are of the same elements
+ // kind.
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
}
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
-
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
- {
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
}
- elms = LeftTrimFixedArray(heap, elms, delta);
+ elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(elms, new_length, len);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ MoveElements(heap, &no_gc,
+ elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(heap, elms, new_length, len);
+ }
}
} else if (item_count > actual_delete_count) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -911,28 +1091,27 @@
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
- {
- // Copy the part before actual_start as is.
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0,
- new_elms, kind, 0, actual_start);
- const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(elms, kind,
- actual_start + actual_delete_count,
- new_elms, kind,
- actual_start + item_count, to_copy);
- }
+ // Copy the part before actual_start as is.
+ ElementsKind kind = array->GetElementsKind();
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ array, 0, new_elms, kind, 0, actual_start, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+ const int to_copy = len - actual_delete_count - actual_start;
+ maybe_failure = accessor->CopyElements(
+ array, actual_start + actual_delete_count, new_elms, kind,
+ actual_start + item_count, to_copy, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
FillWithHoles(heap, new_elms, new_length, capacity);
- elms = new_elms;
+ elms_obj = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
@@ -943,16 +1122,28 @@
}
}
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ Object* arg = args[3 + k - actual_start];
+ if (arg->IsSmi()) {
+ elms->set(k, Smi::cast(arg)->value());
+ } else {
+ elms->set(k, HeapNumber::cast(arg)->value());
+ }
+ }
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ elms->set(k, args[3 + k - actual_start], mode);
+ }
}
if (elms_changed) {
- array->set_elements(elms);
+ array->set_elements(elms_obj);
}
-
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -977,11 +1168,10 @@
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
+ !JSArray::cast(arg)->HasFastElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
-
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@@ -991,27 +1181,24 @@
result_len += len;
ASSERT(result_len >= 0);
- if (result_len > FixedArray::kMaxLength) {
+ if (result_len > FixedDoubleArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- if (!JSArray::cast(arg)->HasFastSmiElements()) {
- if (IsFastSmiElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
+ ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ ElementsKind packed_kind = GetPackedElementsKind(arg_kind);
+ if (IsMoreGeneralElementsKindTransition(
+ GetPackedElementsKind(elements_kind), packed_kind)) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(arg_kind);
+ } else {
+ elements_kind = arg_kind;
}
}
-
- if (JSArray::cast(arg)->HasFastHoleyElements()) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
}
- // Allocate result.
JSArray* result_array;
+ // Allocate result.
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
@@ -1019,19 +1206,19 @@
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
- // Copy data.
- int start_pos = 0;
- FixedArray* result_elms(FixedArray::cast(result_array->elements()));
+ int j = 0;
+ FixedArrayBase* storage = result_array->elements();
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
int len = Smi::cast(array->length())->value();
- FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(elms, elements_kind, 0,
- result_elms, elements_kind,
- start_pos, len);
- start_pos += len;
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, storage, elements_kind, j, len);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ j += len;
}
- ASSERT(start_pos == result_len);
+
+ ASSERT(j == result_len);
return result_array;
}
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 7a72059..117b3b1 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -169,6 +169,122 @@
}
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
+ if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
+ // The OddballStub handles a number and an oddball, not two oddballs.
+ operands_type = BinaryOpIC::GENERIC;
+ }
+ switch (operands_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s+%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(left_type_),
+ BinaryOpIC::GetName(right_type_));
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
+ GenerateBothStringStub(masm);
+ return;
+ }
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
@@ -196,7 +312,12 @@
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
- ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
+#ifdef DEBUG
+ Token::Value cached_op;
+ ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
+ &cached_op);
+ ASSERT(op_ == cached_op);
+#endif
return true;
}
return false;
@@ -204,7 +325,33 @@
int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+ return OpField::encode(op_ - Token::EQ) |
+ LeftStateField::encode(left_) |
+ RightStateField::encode(right_) |
+ HandlerStateField::encode(state_);
+}
+
+
+void ICCompareStub::DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op) {
+ if (left_state) {
+ *left_state =
+ static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
+ }
+ if (right_state) {
+ *right_state =
+ static_cast<CompareIC::State>(RightStateField::decode(minor_key));
+ }
+ if (handler_state) {
+ *handler_state =
+ static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+ if (op) {
+ *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
+ }
}
@@ -213,27 +360,28 @@
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMIS:
+ case CompareIC::SMI:
GenerateSmis(masm);
break;
- case CompareIC::HEAP_NUMBERS:
+ case CompareIC::HEAP_NUMBER:
GenerateHeapNumbers(masm);
break;
- case CompareIC::STRINGS:
+ case CompareIC::STRING:
GenerateStrings(masm);
break;
- case CompareIC::SYMBOLS:
+ case CompareIC::SYMBOL:
GenerateSymbols(masm);
break;
- case CompareIC::OBJECTS:
+ case CompareIC::OBJECT:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- default:
- UNREACHABLE();
+ case CompareIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
}
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index e4384e7..3110b54 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -482,10 +482,132 @@
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ platform_specific_bit_(false),
+ left_type_(BinaryOpIC::UNINITIALIZED),
+ right_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ Initialize();
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ platform_specific_bit_(PlatformSpecificBits::decode(key)),
+ left_type_(left_type),
+ right_type_(right_type),
+ result_type_(result_type) { }
+
+ static void decode_types_from_minor_key(int minor_key,
+ BinaryOpIC::TypeInfo* left_type,
+ BinaryOpIC::TypeInfo* right_type,
+ BinaryOpIC::TypeInfo* result_type) {
+ *left_type =
+ static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
+ *right_type =
+ static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
+ *result_type =
+ static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
+ }
+
+ static Token::Value decode_op_from_minor_key(int minor_key) {
+ return static_cast<Token::Value>(OpBits::decode(minor_key));
+ }
+
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo left_type_;
+ BinaryOpIC::TypeInfo right_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class PlatformSpecificBits: public BitField<bool, 9, 1> {};
+ class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+ class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | PlatformSpecificBits::encode(platform_specific_bit_)
+ | LeftTypeBits::encode(left_type_)
+ | RightTypeBits::encode(right_type_)
+ | ResultTypeBits::encode(result_type_);
+ }
+
+
+ // Platform-independent implementation.
+ void Generate(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ // Platform-independent signature, platform-specific implementation.
+ void Initialize();
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+
+ // Entirely platform-specific methods are defined as static helper
+ // functions in the <arch>/code-stubs-<arch>.cc files.
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+
+ friend class CodeGenerator;
+};
+
+
class ICCompareStub: public CodeStub {
public:
- ICCompareStub(Token::Value op, CompareIC::State state)
- : op_(op), state_(state) {
+ ICCompareStub(Token::Value op,
+ CompareIC::State left,
+ CompareIC::State right,
+ CompareIC::State handler)
+ : op_(op),
+ left_(left),
+ right_(right),
+ state_(handler) {
ASSERT(Token::IsCompareOp(op));
}
@@ -493,13 +615,24 @@
void set_known_map(Handle<Map> map) { known_map_ = map; }
+ static void DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op);
+
+ static CompareIC::State CompareState(int minor_key) {
+ return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+
private:
class OpField: public BitField<int, 0, 3> { };
- class StateField: public BitField<int, 3, 5> { };
+ class LeftStateField: public BitField<int, 3, 3> { };
+ class RightStateField: public BitField<int, 6, 3> { };
+ class HandlerStateField: public BitField<int, 9, 3> { };
virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(state_);
- code->set_compare_operation(op_ - Token::EQ);
+ code->set_stub_info(MinorKey());
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@@ -514,6 +647,7 @@
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
@@ -523,108 +657,13 @@
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_;
+ CompareIC::State left_;
+ CompareIC::State right_;
CompareIC::State state_;
Handle<Map> known_map_;
};
-// Flags that control the compare stub code generation.
-enum CompareFlags {
- NO_COMPARE_FLAGS = 0,
- NO_SMI_COMPARE_IN_STUB = 1 << 0,
- NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
- CANT_BOTH_BE_NAN = 1 << 2
-};
-
-
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
-
-
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags,
- Register lhs,
- Register rhs) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(lhs),
- rhs_(rhs) { }
-
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(no_reg),
- rhs_(no_reg) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
-
- // Generate the comparison code for two smi operands in the stub.
- bool include_smi_compare_;
-
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
-
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key in 16 bits.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
- class RegisterField: public BitField<bool, 4, 1> {};
- class ConditionField: public BitField<int, 5, 11> {};
-
- Major MajorKey() { return Compare; }
-
- int MinorKey();
-
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(CompareIC::GENERIC);
- }
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
-
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- virtual void PrintName(StringStream* stream);
-};
-
-
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
@@ -1053,6 +1092,9 @@
bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
+ bool ContainsAnyOf(Types types) const {
+ return set_.ContainsAnyOf(types.set_);
+ }
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const;
diff --git a/src/compiler.cc b/src/compiler.cc
index 4250de1..82a3e7c 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -864,7 +864,10 @@
{
CompilationHandleScope handle_scope(*info);
- if (InstallCodeFromOptimizedCodeMap(*info)) return;
+ if (!FLAG_manual_parallel_recompilation &&
+ InstallCodeFromOptimizedCodeMap(*info)) {
+ return;
+ }
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index 4982197..1133b20 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -31,7 +31,6 @@
#include "cpu-profiler.h"
#include <new>
-#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@@ -56,11 +55,18 @@
}
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
+TickSample* ProfilerEventsProcessor::StartTickSampleEvent() {
+ if (!ticks_buffer_is_empty_ || ticks_buffer_is_initialized_) return NULL;
+ ticks_buffer_is_initialized_ = true;
generator_->Tick();
- TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
- return &evt->sample;
+ ticks_buffer_ = TickSampleEventRecord(enqueue_order_);
+ return &ticks_buffer_.sample;
+}
+
+
+void ProfilerEventsProcessor::FinishTickSampleEvent() {
+ ASSERT(ticks_buffer_is_initialized_ && ticks_buffer_is_empty_);
+ ticks_buffer_is_empty_ = false;
}
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 3cbac77..08c82db 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -39,19 +39,19 @@
namespace v8 {
namespace internal {
-static const int kEventsBufferSize = 256 * KB;
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ period_in_useconds_(period_in_useconds),
+ ticks_buffer_is_empty_(true),
+ ticks_buffer_is_initialized_(false),
enqueue_order_(0) {
}
@@ -215,23 +215,17 @@
generator_->RecordTickSample(record.sample);
}
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
+ if (ticks_buffer_is_empty_) return !ticks_from_vm_buffer_.IsEmpty();
+ if (ticks_buffer_.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
+ if (ticks_buffer_.sample.frames_count < 0
+ || ticks_buffer_.sample.frames_count > TickSample::kMaxFramesCount) {
+ ticks_buffer_.sample.frames_count = 0;
+ }
+ generator_->RecordTickSample(ticks_buffer_.sample);
+ ticks_buffer_is_empty_ = true;
+ ticks_buffer_is_initialized_ = false;
} else {
return true;
}
@@ -239,22 +233,29 @@
}
+void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time,
+ unsigned* dequeue_order) {
+ while (OS::Ticks() < stop_time) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ }
+}
+
+
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ int64_t stop_time = OS::Ticks() + period_in_useconds_;
+ if (sampler_ != NULL) {
+ sampler_->DoSample();
}
- YieldCPU();
+ ProcessEventsQueue(stop_time, &dequeue_order);
}
- // Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
}
@@ -310,15 +311,22 @@
}
-TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
+TickSample* CpuProfiler::StartTickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->TickSampleEvent();
+ return isolate->cpu_profiler()->processor_->StartTickSampleEvent();
} else {
return NULL;
}
}
+void CpuProfiler::FinishTickSampleEvent(Isolate* isolate) {
+ if (CpuProfiler::is_profiling(isolate)) {
+ isolate->cpu_profiler()->processor_->FinishTickSampleEvent();
+ }
+}
+
+
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
@@ -486,13 +494,15 @@
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
+ Sampler* sampler = isolate->logger()->sampler();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ processor_ = new ProfilerEventsProcessor(generator_,
+ sampler,
+ FLAG_cpu_profiler_sampling_period);
NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -505,12 +515,13 @@
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
+ processor_->Start();
}
}
@@ -545,16 +556,17 @@
void CpuProfiler::StopProcessor() {
+ NoBarrier_Store(&is_profiling_, false);
+ processor_->Stop();
+ processor_->Join();
Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
+ Sampler* sampler = logger->sampler();
sampler->DecreaseProfilingDepth();
+ sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
- NoBarrier_Store(&is_profiling_, false);
- processor_->Stop();
- processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 9cd4484..f4bc0c7 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -124,7 +124,9 @@
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -156,11 +158,12 @@
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
- // Tick sample events are filled directly in the buffer of the circular
- // queue (because the structure is of fixed width, but usually not all
- // stack frame entries are filled.) This method returns a pointer to the
- // next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
+ // StartTickSampleEvent returns a pointer only if the ticks_buffer_ is empty,
+ // FinishTickSampleEvent marks the ticks_buffer_ as filled.
+ // Finish should be called only after successful Start (returning non-NULL
+ // pointer).
+ INLINE(TickSample* StartTickSampleEvent());
+ INLINE(void FinishTickSampleEvent());
private:
union CodeEventsContainer {
@@ -173,13 +176,19 @@
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
+ void ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
+ TickSampleEventRecord ticks_buffer_;
+ bool ticks_buffer_is_empty_;
+ bool ticks_buffer_is_initialized_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
@@ -218,7 +227,10 @@
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent(Isolate* isolate);
+ // Finish should be called only after successful Start (returning non-NULL
+ // pointer).
+ static TickSample* StartTickSampleEvent(Isolate* isolate);
+ static void FinishTickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
diff --git a/src/d8.cc b/src/d8.cc
index b3b1bb8..5598bd3 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -67,6 +67,49 @@
namespace v8 {
+
+static Handle<Value> Throw(const char* message) {
+ return ThrowException(String::New(message));
+}
+
+
+// TODO(rossberg): should replace these by proper uses of HasInstance,
+// once we figure out a good way to make the templates global.
+const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
+const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
+
+
+namespace Symbols {
+#define FOR_EACH_SYMBOL(V) \
+ V(ArrayBuffer, "ArrayBuffer") \
+ V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
+ V(ArrayMarkerPropName, kArrayMarkerPropName) \
+ V(buffer, "buffer") \
+ V(byteLength, "byteLength") \
+ V(byteOffset, "byteOffset") \
+ V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
+ V(length, "length")
+
+#define DEFINE_SYMBOL(name, value) Persistent<String> name;
+FOR_EACH_SYMBOL(DEFINE_SYMBOL)
+#undef DEFINE_SYMBOL
+
+void Initialize() {
+ HandleScope scope;
+#define INIT_SYMBOL(name, value) \
+ name = Persistent<String>::New(String::NewSymbol(value));
+FOR_EACH_SYMBOL(INIT_SYMBOL)
+#undef INIT_SYMBOL
+}
+
+void TearDown() {
+#define DISPOSE_SYMBOL(name, value) name.Dispose();
+ FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
+#undef DISPOSE_SYMBOL
+}
+}
+
+
LineEditor *LineEditor::first_ = NULL;
@@ -232,11 +275,11 @@
Handle<Value> Shell::Read(const Arguments& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
return source;
}
@@ -277,14 +320,14 @@
HandleScope handle_scope;
String::Utf8Value file(args[i]);
if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
if (!ExecuteString(source, String::New(*file), false, true)) {
- return ThrowException(String::New("Error executing file"));
+ return Throw("Error executing file");
}
}
return Undefined();
@@ -314,7 +357,7 @@
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
- ThrowException(String::New("Array length must not be negative."));
+ Throw("Array length must not be negative.");
return 0;
}
@@ -323,33 +366,26 @@
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) {
- ThrowException(
- String::New("Array length exceeds maximum length."));
+ Throw("Array length exceeds maximum length.");
}
return raw_value;
}
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
- return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
+ return Throw("ArrayBuffer exceeds maximum size (2G)");
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed"));
+ return Throw("Memory allocation failed");
}
memset(data, 0, length);
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
+ buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName, True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
@@ -357,7 +393,7 @@
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
- buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
+ buffer->Set(Symbols::byteLength, Int32::New(length), ReadOnly);
return buffer;
}
@@ -373,8 +409,7 @@
}
if (args.Length() == 0) {
- return ThrowException(
- String::New("ArrayBuffer constructor must have one argument"));
+ return Throw("ArrayBuffer constructor must have one argument");
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
@@ -400,12 +435,12 @@
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
- array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
- array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
- array->Set(String::New("length"), Int32::New(length), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
- array->Set(String::New("buffer"), buffer, ReadOnly);
+ array->SetHiddenValue(Symbols::ArrayMarkerPropName, Int32::New(type));
+ array->Set(Symbols::byteLength, Int32::New(byteLength), ReadOnly);
+ array->Set(Symbols::byteOffset, Int32::New(byteOffset), ReadOnly);
+ array->Set(Symbols::length, Int32::New(length), ReadOnly);
+ array->Set(Symbols::BYTES_PER_ELEMENT, Int32::New(element_size));
+ array->Set(Symbols::buffer, buffer, ReadOnly);
return array;
}
@@ -439,16 +474,15 @@
int32_t byteOffset;
bool init_from_array = false;
if (args.Length() == 0) {
- return ThrowException(
- String::New("Array constructor must have at least one argument"));
+ return Throw("Array constructor must have at least one argument");
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
- String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
+ Symbols::ArrayBufferMarkerPropName).IsEmpty()) {
// Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
- convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
+ convertToUint(buffer->Get(Symbols::byteLength), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
@@ -457,11 +491,10 @@
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
- return ThrowException(String::New("byteOffset out of bounds"));
+ return Throw("byteOffset out of bounds");
}
if (byteOffset % element_size != 0) {
- return ThrowException(
- String::New("byteOffset must be multiple of element size"));
+ return Throw("byteOffset must be multiple of element size");
}
}
@@ -469,23 +502,22 @@
byteLength = bufferLength - byteOffset;
length = byteLength / element_size;
if (byteLength % element_size != 0) {
- return ThrowException(
- String::New("buffer size must be multiple of element size"));
+ return Throw("buffer size must be multiple of element size");
}
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
- return ThrowException(String::New("length out of bounds"));
+ return Throw("length out of bounds");
}
}
} else {
if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(String::New("length"))) {
+ args[0]->ToObject()->Has(Symbols::length)) {
// Construct from array.
length = convertToUint(
- args[0]->ToObject()->Get(String::New("length")), &try_catch);
+ args[0]->ToObject()->Get(Symbols::length), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true;
} else {
@@ -497,7 +529,7 @@
byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
+ Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer);
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
@@ -522,25 +554,22 @@
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'slice' invoked on non-object receiver"));
+ return Throw("'slice' invoked on non-object receiver");
}
Local<Object> self = args.This();
Local<Value> marker =
- self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
+ self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName);
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'slice' invoked on wrong receiver type"));
+ return Throw("'slice' invoked on wrong receiver type");
}
int32_t length =
- convertToUint(self->Get(String::New("byteLength")), &try_catch);
+ convertToUint(self->Get(Symbols::byteLength), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'slice' must have at least one argument"));
+ return Throw("'slice' must have at least one argument");
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -579,32 +608,29 @@
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'subarray' invoked on non-object receiver"));
+ return Throw("'subarray' invoked on non-object receiver");
}
Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ Local<Value> marker = self->GetHiddenValue(Symbols::ArrayMarkerPropName);
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'subarray' invoked on wrong receiver type"));
+ return Throw("'subarray' invoked on wrong receiver type");
}
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer)->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
+ convertToUint(self->Get(Symbols::length), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'subarray' must have at least one argument"));
+ return Throw("'subarray' must have at least one argument");
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -639,35 +665,31 @@
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'set' invoked on non-object receiver"));
+ return Throw("'set' invoked on non-object receiver");
}
Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ Local<Value> marker = self->GetHiddenValue(Symbols::ArrayMarkerPropName);
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'set' invoked on wrong receiver type"));
+ return Throw("'set' invoked on wrong receiver type");
}
int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
+ convertToUint(self->Get(Symbols::length), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'set' must have at least one argument"));
+ return Throw("'set' must have at least one argument");
}
if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(String::New("length"))) {
- return ThrowException(
- String::New("'set' invoked with non-array argument"));
+ !args[0]->ToObject()->Has(Symbols::length)) {
+ return Throw("'set' invoked with non-array argument");
}
Handle<Object> source = args[0]->ToObject();
int32_t source_length =
- convertToUint(source->Get(String::New("length")), &try_catch);
+ convertToUint(source->Get(Symbols::length), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset;
@@ -678,31 +700,31 @@
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (offset + source_length > length) {
- return ThrowException(String::New("offset or source length out of bounds"));
+ return Throw("offset or source length out of bounds");
}
int32_t source_element_size;
- if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
+ if (source->GetHiddenValue(Symbols::ArrayMarkerPropName).IsEmpty()) {
source_element_size = 0;
} else {
source_element_size =
- convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer)->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
+ source->Get(Symbols::buffer)->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(source->Get(Symbols::byteOffset), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@@ -718,10 +740,10 @@
}
} else {
// Need to copy element-wise to make the right conversions.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer)->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
+ source->Get(Symbols::buffer)->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) {
@@ -729,10 +751,10 @@
// This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(source->Get(Symbols::byteOffset), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right.
@@ -779,7 +801,7 @@
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
int32_t length =
- object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
+ object->ToObject()->Get(Symbols::byteLength)->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose();
@@ -1165,7 +1187,7 @@
// Bind the handlers for external arrays.
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(String::New("ArrayBuffer"),
+ global_template->Set(Symbols::ArrayBuffer,
CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr);
@@ -1223,6 +1245,8 @@
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
#endif // V8_SHARED
+
+ Symbols::Initialize();
if (options.test_shell) return;
#ifndef V8_SHARED
@@ -1381,15 +1405,15 @@
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
- return ThrowException(String::New("Error reading file"));
+ return Throw("Error reading file");
}
Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
+ buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName, True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
@@ -1397,7 +1421,7 @@
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
- buffer->Set(String::New("byteLength"),
+ buffer->Set(Symbols::byteLength,
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
}
@@ -1895,6 +1919,7 @@
RunShell();
}
+ Symbols::TearDown();
V8::Dispose();
#ifndef V8_SHARED
diff --git a/src/date.js b/src/date.js
index a54cb23..c75d12c 100644
--- a/src/date.js
+++ b/src/date.js
@@ -107,7 +107,7 @@
}
// Now we rely on year and month being SMIs.
- return %DateMakeDay(year, month) + date - 1;
+ return %DateMakeDay(year | 0, month | 0) + date - 1;
}
diff --git a/src/elements.cc b/src/elements.cc
index 8cb48c6..0f4d533 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -146,13 +146,13 @@
}
-void CopyObjectToObjectElements(FixedArray* from,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
+static void CopyObjectToObjectElements(FixedArray* from,
+ ElementsKind from_kind,
+ uint32_t from_start,
+ FixedArray* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -586,6 +586,49 @@
return backing_store->is_the_hole(key) ? ABSENT : NONE;
}
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetTypeImpl(
+ receiver, holder, key, BackingStore::cast(backing_store));
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return NONEXISTENT;
+ }
+ return backing_store->is_the_hole(key) ? NONEXISTENT : FIELD;
+ }
+
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetAccessorPairImpl(
+ receiver, holder, key, BackingStore::cast(backing_store));
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ return NULL;
+ }
+
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
@@ -653,7 +696,7 @@
}
}
}
- if (from->length() == 0) {
+ if (from->length() == 0 || copy_size == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
@@ -1172,7 +1215,17 @@
BackingStore* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? NONE : ABSENT;
+ ? NONE : ABSENT;
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ return
+ key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ ? FIELD : NONEXISTENT;
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
@@ -1475,6 +1528,32 @@
return ABSENT;
}
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ SeededNumberDictionary* backing_store) {
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return backing_store->DetailsAt(entry).type();
+ }
+ return NONEXISTENT;
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound &&
+ backing_store->DetailsAt(entry).type() == CALLBACKS &&
+ backing_store->ValueAt(entry)->IsAccessorPair()) {
+ return AccessorPair::cast(backing_store->ValueAt(entry));
+ }
+ return NULL;
+ }
+
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@@ -1550,6 +1629,38 @@
}
}
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArray* parameter_map) {
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return FIELD;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetType(
+ receiver, obj, key, arguments);
+ }
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArray* parameter_map) {
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return NULL;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
+ receiver, obj, key, arguments);
+ }
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
diff --git a/src/elements.h b/src/elements.h
index 8a83f0f..ffd6428 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -82,6 +82,28 @@
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
+ // Returns an element's type, or NONEXISTENT if there is no such
+ // element. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
+ // Returns an element's accessors, or NULL if the element does not exist or
+ // is plain. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@@ -175,16 +197,6 @@
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
-
-void CopyObjectToObjectElements(FixedArray* from_obj,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to_obj,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size);
-
-
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_
diff --git a/src/execution.cc b/src/execution.cc
index 89091ba..8942fb3 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -937,7 +937,8 @@
}
stack_guard->Continue(CODE_READY);
}
- if (!stack_guard->IsTerminateExecution()) {
+ if (!stack_guard->IsTerminateExecution() &&
+ !FLAG_manual_parallel_recompilation) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
diff --git a/src/factory.cc b/src/factory.cc
index 32a1755..b5f6c56 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -178,7 +178,7 @@
}
-Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+Handle<String> Factory::LookupAsciiSymbol(Handle<SeqOneByteString> string,
int from,
int length) {
CALL_HEAP_FUNCTION(isolate(),
@@ -222,12 +222,12 @@
}
-Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawAsciiString(length, pretenure),
- SeqAsciiString);
+ SeqOneByteString);
}
@@ -968,6 +968,7 @@
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
+ elements->length(),
pretenure),
JSArray);
}
@@ -1366,7 +1367,7 @@
// Check to see whether there is a matching element in the cache.
Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
+ Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
Handle<Map> map =
@@ -1418,7 +1419,7 @@
bool* pending_exception) {
// Configure the instance by adding the properties specified by the
// instance template.
- Handle<Object> instance_template = Handle<Object>(desc->instance_template());
+ Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
Execution::ConfigureInstance(instance,
instance_template,
diff --git a/src/factory.h b/src/factory.h
index 95a33f9..dd613b7 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -82,7 +82,7 @@
Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str);
Handle<String> LookupAsciiSymbol(Vector<const char> str);
- Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+ Handle<String> LookupAsciiSymbol(Handle<SeqOneByteString>,
int from,
int length);
Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
@@ -130,7 +130,7 @@
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<SeqAsciiString> NewRawAsciiString(
+ Handle<SeqOneByteString> NewRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString(
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 694dbea..69692c4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -225,7 +225,7 @@
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
- "allow uint32 values on optimize frames if they are used only in"
+ "allow uint32 values on optimize frames if they are used only in "
"safe operations")
DEFINE_bool(parallel_recompilation, false,
@@ -233,6 +233,9 @@
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue")
+DEFINE_bool(manual_parallel_recompilation, false,
+ "disable automatic optimization")
+DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -346,6 +349,10 @@
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_period, 1000,
+ "CPU profiler sampling period in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
@@ -438,6 +445,9 @@
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
+DEFINE_bool(use_marking_progress_bar, false,
+ "Use a progress bar to scan large objects in increments when "
+ "incremental marking is active.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
diff --git a/src/handles.h b/src/handles.h
index b80dbe5..3b0d2f7 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -97,8 +97,8 @@
// Convenience wrapper.
template<class T>
-inline Handle<T> handle(T* t) {
- return Handle<T>(t);
+inline Handle<T> handle(T* t, Isolate* isolate) {
+ return Handle<T>(t, isolate);
}
diff --git a/src/heap-inl.h b/src/heap-inl.h
index e038453..0bdf522 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -109,12 +109,12 @@
MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
uint32_t hash_field) {
- if (str.length() > SeqAsciiString::kMaxLength) {
+ if (str.length() > SeqOneByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Compute map and object size.
Map* map = ascii_symbol_map();
- int size = SeqAsciiString::SizeFor(str.length());
+ int size = SeqOneByteString::SizeFor(str.length());
// Allocate string.
Object* result;
@@ -134,7 +134,7 @@
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqAsciiString::kHeaderSize,
+ memcpy(answer->address() + SeqOneByteString::kHeaderSize,
str.start(), str.length());
return answer;
diff --git a/src/heap.cc b/src/heap.cc
index 5e760c3..307960b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -137,6 +137,7 @@
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
+ low_survival_rate_period_length_(0),
survival_rate_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
@@ -1763,7 +1764,7 @@
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
- table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
@@ -2007,11 +2008,11 @@
}
- static inline void EvacuateSeqAsciiString(Map* map,
+ static inline void EvacuateSeqOneByteString(Map* map,
HeapObject** slot,
HeapObject* object) {
- int object_size = SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
+ int object_size = SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -3353,7 +3354,7 @@
{ MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ char* dest = SeqOneByteString::cast(result)->GetChars();
dest[0] = c1;
dest[1] = c2;
return result;
@@ -3427,20 +3428,20 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ char* dest = SeqOneByteString::cast(result)->GetChars();
// Copy first part.
const char* src;
if (first->IsExternalString()) {
src = ExternalAsciiString::cast(first)->GetChars();
} else {
- src = SeqAsciiString::cast(first)->GetChars();
+ src = SeqOneByteString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
src = ExternalAsciiString::cast(second)->GetChars();
} else {
- src = SeqAsciiString::cast(second)->GetChars();
+ src = SeqOneByteString::cast(second)->GetChars();
}
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
@@ -3451,7 +3452,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ char* dest = SeqOneByteString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
@@ -3528,7 +3529,7 @@
// Copy the characters into the new object.
if (is_ascii) {
ASSERT(string_result->IsAsciiRepresentation());
- char* dest = SeqAsciiString::cast(string_result)->GetChars();
+ char* dest = SeqOneByteString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
@@ -3787,7 +3788,7 @@
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
+ code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
@@ -4181,7 +4182,7 @@
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -4246,7 +4247,7 @@
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@@ -4273,13 +4274,14 @@
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
- array->set_length(Smi::FromInt(elements->length()));
+ array->set_length(Smi::FromInt(length));
array->ValidateElements();
return array;
}
@@ -4570,7 +4572,7 @@
}
// Copy the characters into the new object.
- CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
+ CopyChars(SeqOneByteString::cast(result)->GetChars(), string.start(), length);
return result;
}
@@ -4625,7 +4627,7 @@
if (String::IsAscii(start, length)) {
MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
+ CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
} else { // It's not an ASCII string.
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4680,11 +4682,11 @@
Map* map;
if (is_ascii) {
- if (chars > SeqAsciiString::kMaxLength) {
+ if (chars > SeqOneByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
map = ascii_symbol_map();
- size = SeqAsciiString::SizeFor(chars);
+ size = SeqOneByteString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
@@ -4725,12 +4727,12 @@
MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- if (length < 0 || length > SeqAsciiString::kMaxLength) {
+ if (length < 0 || length > SeqOneByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
- int size = SeqAsciiString::SizeFor(length);
- ASSERT(size <= SeqAsciiString::kMaxSize);
+ int size = SeqOneByteString::SizeFor(length);
+ ASSERT(size <= SeqOneByteString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
@@ -4762,7 +4764,7 @@
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ char* dest = SeqOneByteString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif
@@ -5623,7 +5625,7 @@
}
-MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
int from,
int length) {
Object* symbol = NULL;
@@ -7227,6 +7229,7 @@
} else {
PrintF("stepscount=%d ", steps_count_);
PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+ PrintF("longeststep=%.f ", longest_step_);
}
PrintF("\n");
diff --git a/src/heap.h b/src/heap.h
index eecb436..c6b3794 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -584,6 +584,7 @@
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
@@ -1044,9 +1045,8 @@
return LookupSymbol(CStrVector(str));
}
MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
- int from,
- int length);
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(
+ Handle<SeqOneByteString> string, int from, int length);
bool LookupSymbolIfExists(String* str, String** symbol);
bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index feac1be..ec23e19 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -85,6 +85,81 @@
}
+void HValue::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HValue::RepresentationFromUses() {
+ if (HasNoUses()) return Representation::None();
+
+ // Array of use counts for each representation.
+ int use_count[Representation::kNumRepresentations] = { 0 };
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->observed_input_representation(it.index());
+ if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("#%d %s is used by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ }
+ use_count[rep.kind()] += use->LoopWeight();
+ }
+ if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
+ int tagged_count = use_count[Representation::kTagged];
+ int double_count = use_count[Representation::kDouble];
+ int int32_count = use_count[Representation::kInteger32];
+
+ if (tagged_count > 0) return Representation::Tagged();
+ if (double_count > 0) return Representation::Double();
+ if (int32_count > 0) return Representation::Integer32();
+
+ return Representation::None();
+}
+
+
+void HValue::UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ Representation r = representation();
+ if (new_rep.is_more_general_than(r)) {
+ // When an HConstant is marked "not convertible to integer", then
+ // never try to represent it as an integer.
+ if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
+ new_rep = Representation::Tagged();
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
+ " (%s want i)\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ } else {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ }
+ ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(h_infer);
+ }
+}
+
+
+void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ h_infer->AddToWorklist(it.value());
+ }
+ for (int i = 0; i < OperandCount(); ++i) {
+ h_infer->AddToWorklist(OperandAt(i));
+ }
+}
+
+
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
@@ -301,6 +376,7 @@
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
@@ -764,6 +840,24 @@
}
+Representation HBranch::observed_input_representation(int index) {
+ static const ToBooleanStub::Types tagged_types(
+ ToBooleanStub::UNDEFINED |
+ ToBooleanStub::NULL_TYPE |
+ ToBooleanStub::SPEC_OBJECT |
+ ToBooleanStub::STRING);
+ if (expected_input_types_.ContainsAnyOf(tagged_types)) {
+ return Representation::Tagged();
+ } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ return Representation::Integer32();
+ } else {
+ return Representation::None();
+ }
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@@ -1339,15 +1433,11 @@
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->ObservedInputRepresentation(it.index());
+ Representation rep = value->observed_input_representation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- this->id(),
- this->Mnemonic(),
- value->id(),
- value->Mnemonic(),
- rep.Mnemonic());
+ PrintF("#%d Phi is used by real #%d %s as %s\n",
+ id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
}
}
@@ -1356,11 +1446,8 @@
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
- this->id(),
- this->Mnemonic(),
- other->id(),
- other->Mnemonic(),
+ PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
+ id(), other->id(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@@ -1379,9 +1466,20 @@
}
-void HPhi::ResetInteger32Uses() {
- non_phi_uses_[Representation::kInteger32] = 0;
- indirect_uses_[Representation::kInteger32] = 0;
+void HSimulate::MergeInto(HSimulate* other) {
+ for (int i = 0; i < values_.length(); ++i) {
+ HValue* value = values_[i];
+ if (HasAssignedIndexAt(i)) {
+ other->AddAssignedValue(GetAssignedIndexAt(i), value);
+ } else {
+ if (other->pop_count_ > 0) {
+ other->pop_count_--;
+ } else {
+ other->AddPushedValue(value);
+ }
+ }
+ }
+ other->pop_count_ += pop_count();
}
@@ -1390,7 +1488,7 @@
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
- for (int i = 0; i < values_.length(); ++i) {
+ for (int i = values_.length() - 1; i >= 0; --i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
@@ -1429,7 +1527,6 @@
: handle_(handle),
has_int32_value_(false),
has_double_value_(false) {
- set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
@@ -1438,6 +1535,16 @@
double_value_ = n;
has_double_value_ = true;
}
+ if (r.IsNone()) {
+ if (has_int32_value_) {
+ r = Representation::Integer32();
+ } else if (has_double_value_) {
+ r = Representation::Double();
+ } else {
+ r = Representation::Tagged();
+ }
+ }
+ set_representation(r);
}
@@ -1536,6 +1643,52 @@
}
+void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ // When the operation has information about its own output type, don't look
+ // at uses.
+ if (!observed_output_representation_.IsNone()) return;
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HBinaryOperation::RepresentationFromInputs() {
+ // Determine the worst case of observed input representations and
+ // the currently assumed output representation.
+ Representation rep = representation();
+ if (observed_output_representation_.is_more_general_than(rep)) {
+ rep = observed_output_representation_;
+ }
+ for (int i = 1; i <= 2; ++i) {
+ Representation input_rep = observed_input_representation(i);
+ if (input_rep.is_more_general_than(rep)) rep = input_rep;
+ }
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+
+ if (left_rep.is_more_general_than(rep) &&
+ left()->CheckFlag(kFlexibleRepresentation)) {
+ rep = left_rep;
+ }
+ if (right_rep.is_more_general_than(rep) &&
+ right()->CheckFlag(kFlexibleRepresentation)) {
+ rep = right_rep;
+ }
+ return rep;
+}
+
+
+void HBinaryOperation::AssumeRepresentation(Representation r) {
+ set_observed_input_representation(r, r);
+ HValue::AssumeRepresentation(r);
+}
+
+
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
@@ -1667,9 +1820,19 @@
}
-void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
- input_representation_ = r;
- if (r.IsDouble()) {
+void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
+ Representation rep = Representation::None();
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ bool observed_integers =
+ observed_input_representation(0).IsInteger32() &&
+ observed_input_representation(1).IsInteger32();
+ bool inputs_are_not_doubles =
+ !left_rep.IsDouble() && !right_rep.IsDouble();
+ if (observed_integers && inputs_are_not_doubles) {
+ rep = Representation::Integer32();
+ } else {
+ rep = Representation::Double();
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@@ -1686,9 +1849,8 @@
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
- } else {
- ASSERT(r.IsInteger32());
}
+ ChangeRepresentation(rep);
}
@@ -2451,7 +2613,41 @@
}
-Representation HPhi::InferredRepresentation() {
+void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ // If there are non-Phi uses, and all of them have observed the same
+ // representation, than that's what this Phi is going to use.
+ Representation new_rep = RepresentationObservedByAllNonPhiUses();
+ if (!new_rep.IsNone()) {
+ UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
+ return;
+ }
+ new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+}
+
+
+Representation HPhi::RepresentationObservedByAllNonPhiUses() {
+ int non_phi_use_count = 0;
+ for (int i = Representation::kInteger32;
+ i < Representation::kNumRepresentations; ++i) {
+ non_phi_use_count += non_phi_uses_[i];
+ }
+ if (non_phi_use_count <= 1) return Representation::None();
+ for (int i = 0; i < Representation::kNumRepresentations; ++i) {
+ if (non_phi_uses_[i] == non_phi_use_count) {
+ return Representation::FromKind(static_cast<Representation::Kind>(i));
+ }
+ }
+ return Representation::None();
+}
+
+
+Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
@@ -2460,6 +2656,7 @@
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
+ if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
@@ -2478,7 +2675,9 @@
return Representation::Tagged();
}
} else {
- return Representation::Tagged();
+ if (value->IsPhi() && !IsConvertibleToInteger()) {
+ return Representation::Tagged();
+ }
}
}
}
@@ -2491,6 +2690,37 @@
}
+Representation HPhi::RepresentationFromUseRequirements() {
+ Representation all_uses_require = Representation::None();
+ bool all_uses_require_the_same = true;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ // No useful info from this use -> look at the next one.
+ if (use_rep.IsNone()) {
+ continue;
+ }
+ if (use_rep.Equals(all_uses_require)) {
+ continue;
+ }
+ // This use's representation contradicts what we've seen so far.
+ if (!all_uses_require.IsNone()) {
+ ASSERT(!use_rep.Equals(all_uses_require));
+ all_uses_require_the_same = false;
+ break;
+ }
+ // Otherwise, initialize observed representation.
+ all_uses_require = use_rep;
+ }
+ if (all_uses_require_the_same) {
+ return all_uses_require;
+ }
+
+ return Representation::None();
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d118354..e529078 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -45,6 +45,7 @@
// Forward declarations.
class HBasicBlock;
class HEnvironment;
+class HInferRepresentation;
class HInstruction;
class HLoopInformation;
class HValue;
@@ -308,9 +309,9 @@
public:
enum Kind {
kNone,
- kTagged,
- kDouble,
kInteger32,
+ kDouble,
+ kTagged,
kExternal,
kNumRepresentations
};
@@ -323,10 +324,18 @@
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
+ bool is_more_general_than(const Representation& other) {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
@@ -629,13 +638,15 @@
virtual bool EmitAtUses() { return false; }
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
- // Representation was already set and is allowed to be changed.
- ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
RepresentationChanged(r);
representation_ = r;
+ if (r.IsTagged()) {
+ // Tagged is the bottom of the lattice, don't go any further.
+ ClearFlag(kFlexibleRepresentation);
+ }
}
- void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r);
virtual bool IsConvertibleToInteger() const { return true; }
@@ -733,16 +744,11 @@
void ComputeInitialRange(Zone* zone);
// Representation helpers.
+ virtual Representation observed_input_representation(int index) {
+ return Representation::None();
+ }
virtual Representation RequiredInputRepresentation(int index) = 0;
-
- virtual Representation InferredRepresentation() {
- return representation();
- }
-
- // Type feedback access.
- virtual Representation ObservedInputRepresentation(int index) {
- return RequiredInputRepresentation(index);
- }
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
@@ -790,7 +796,18 @@
UNREACHABLE();
return false;
}
+
+ virtual Representation RepresentationFromInputs() {
+ return representation();
+ }
+ Representation RepresentationFromUses();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason);
+ void AddDependantsToWorklist(HInferRepresentation* h_infer);
+
virtual void RepresentationChanged(Representation to) { }
+
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
@@ -800,7 +817,6 @@
}
void set_representation(Representation r) {
- // Representation is set-once.
ASSERT(representation_.IsNone() && !r.IsNone());
representation_ = r;
}
@@ -1113,6 +1129,7 @@
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ virtual Representation observed_input_representation(int index);
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1317,14 +1334,24 @@
};
+enum RemovableSimulate {
+ REMOVABLE_SIMULATE,
+ FIXED_SIMULATE
+};
+
+
class HSimulate: public HInstruction {
public:
- HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
+ HSimulate(BailoutId ast_id,
+ int pop_count,
+ Zone* zone,
+ RemovableSimulate removable)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
assigned_indexes_(2, zone),
- zone_(zone) {}
+ zone_(zone),
+ removable_(removable) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
@@ -1358,6 +1385,9 @@
return Representation::None();
}
+ void MergeInto(HSimulate* other);
+ bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
@@ -1384,6 +1414,7 @@
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
+ RemovableSimulate removable_;
};
@@ -2010,6 +2041,9 @@
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
@@ -2037,7 +2071,7 @@
set_representation(Representation::Integer32());
break;
case kMathAbs:
- set_representation(Representation::Tagged());
+ // Not setting representation here: it is None intentionally.
SetFlag(kFlexibleRepresentation);
SetGVNFlag(kChangesNewSpacePromotion);
break;
@@ -2217,6 +2251,7 @@
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
@@ -2444,13 +2479,15 @@
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation();
+ virtual Representation RepresentationFromInputs();
virtual Range* InferRange(Zone* zone);
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ Representation RepresentationObservedByAllNonPhiUses();
+ Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
@@ -2514,14 +2551,17 @@
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) {
+ if (FLAG_trace_representation) {
+ HValue* input = OperandAt(i);
+ PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
+ id(), Mnemonic(), input->id(), input->Mnemonic(), i);
+ }
return false;
}
}
return true;
}
- void ResetInteger32Uses();
-
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -2704,11 +2744,14 @@
class HBinaryOperation: public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : observed_output_representation_(Representation::None()) {
ASSERT(left != NULL && right != NULL);
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
+ observed_input_representation_[0] = Representation::None();
+ observed_input_representation_[1] = Representation::None();
}
HValue* context() { return OperandAt(0); }
@@ -2727,11 +2770,34 @@
return right();
}
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
+ }
+
+ virtual void initialize_output_representation(Representation observed) {
+ observed_output_representation_ = observed;
+ }
+
+ virtual Representation observed_input_representation(int index) {
+ if (index == 0) return Representation::Tagged();
+ return observed_input_representation_[index - 1];
+ }
+
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual Representation RepresentationFromInputs();
+ virtual void AssumeRepresentation(Representation r);
+
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
+
+ private:
+ Representation observed_input_representation_[2];
+ Representation observed_output_representation_;
};
@@ -2901,6 +2967,9 @@
}
return Representation::Integer32();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
virtual void PrintDataTo(StringStream* stream);
@@ -2919,12 +2988,9 @@
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
+ SetFlag(kTruncatingToInt32);
SetAllSideEffects();
- observed_input_representation_[0] = Representation::Tagged();
- observed_input_representation_[1] = Representation::None();
- observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2937,28 +3003,32 @@
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearAllSideEffects();
- SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
+ } else {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
}
}
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ HValue::UpdateRepresentation(new_rep, h_infer, reason);
+ }
+
+ virtual void initialize_output_representation(Representation observed) {
+ if (observed.IsDouble()) observed = Representation::Integer32();
+ HBinaryOperation::initialize_output_representation(observed);
+ }
+
virtual HType CalculateInferredType();
- virtual Representation ObservedInputRepresentation(int index) {
- return observed_input_representation_[index];
- }
-
- void InitializeObservedInputRepresentation(Representation r) {
- observed_input_representation_[1] = r;
- observed_input_representation_[2] = r;
- }
-
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
virtual bool IsDeletable() const { return true; }
-
- Representation observed_input_representation_[3];
};
@@ -2991,13 +3061,15 @@
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
+ SetFlag(kFlexibleRepresentation);
}
virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
+ if (to.IsTagged()) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
@@ -3010,13 +3082,6 @@
: representation();
}
- virtual Representation InferredRepresentation() {
- if (left()->representation().Equals(right()->representation())) {
- return left()->representation();
- }
- return HValue::InferredRepresentation();
- }
-
private:
virtual bool IsDeletable() const { return true; }
};
@@ -3035,11 +3100,9 @@
}
virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
- return Representation::Tagged();
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
}
Token::Value token() const { return token_; }
@@ -3058,6 +3121,7 @@
public:
HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
+ SetFlag(kFlexibleRepresentation);
ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
@@ -3067,20 +3131,26 @@
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
- void SetInputRepresentation(Representation r);
- Representation GetInputRepresentation() const {
- return input_representation_;
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
}
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+
virtual Representation RequiredInputRepresentation(int index) {
- return input_representation_;
+ return representation();
+ }
+ virtual Representation observed_input_representation(int index) {
+ return observed_input_representation_[index];
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
- Representation input_representation_;
+ Representation observed_input_representation_[2];
Token::Value token_;
};
@@ -3141,6 +3211,9 @@
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
@@ -3418,6 +3491,9 @@
? Representation::Double()
: Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
DECLARE_CONCRETE_INSTRUCTION(Power)
@@ -3603,14 +3679,19 @@
operation_(op) { }
virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
+ return index == 0 ? Representation::Tagged()
+ : representation();
+ }
- virtual Representation InferredRepresentation() {
- if (left()->representation().IsInteger32() &&
- right()->representation().IsInteger32()) {
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
+ virtual Representation RepresentationFromInputs() {
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
+ (right_rep.IsNone() || right_rep.IsInteger32())) {
return Representation::Integer32();
}
return Representation::Double();
@@ -4336,6 +4417,10 @@
return Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
virtual void PrintDataTo(StringStream* stream);
bool RequiresHoleCheck() const;
@@ -4527,6 +4612,12 @@
} else {
SetGVNFlag(kChangesArrayElements);
}
+
+ // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+ if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
+ elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ SetFlag(kTruncatingToInt32);
+ }
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4552,6 +4643,19 @@
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+
+ virtual Representation observed_input_representation(int index) {
+ if (index < 2) return RequiredInputRepresentation(index);
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return Representation::Double();
+ }
+ if (is_external()) {
+ return Representation::Integer32();
+ }
+ // For fast object elements kinds, don't assume anything.
+ return Representation::None();
+ }
+
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 51ae7f5..da69d79 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -133,7 +133,8 @@
}
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
+ RemovableSimulate removable) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
@@ -142,8 +143,12 @@
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
- for (int i = push_count - 1; i >= 0; --i) {
+ HSimulate* instr =
+ new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+ // Order of pushed values: newest (top of stack) first. This allows
+ // HSimulate::MergeInto() to easily append additional pushed values
+ // that are older (from further down the stack).
+ for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
@@ -1291,7 +1296,7 @@
void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->GetInputRepresentation().IsInteger32()) {
+ if (test->representation().IsInteger32()) {
Token::Value op = test->token();
if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
@@ -2239,32 +2244,8 @@
}
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
-
- private:
- Representation TryChange(HValue* current);
- void AddToWorklist(HValue* current);
- void InferBasedOnInputs(HValue* current);
- void AddDependantsToWorklist(HValue* current);
- void InferBasedOnUses(HValue* current);
-
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsSpecialization()) return;
+ if (current->representation().IsTagged()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
worklist_.Add(current, zone());
@@ -2272,105 +2253,6 @@
}
-// This method tries to specialize the representation type of the value
-// given as a parameter. The value is asked to infer its representation type
-// based on its inputs. If the inferred type is more specialized, then this
-// becomes the new representation type of the node.
-void HInferRepresentation::InferBasedOnInputs(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation inferred = current->InferredRepresentation();
- if (inferred.IsSpecialization()) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on inputs\n",
- current->id(),
- r.Mnemonic(),
- inferred.Mnemonic());
- }
- current->ChangeRepresentation(inferred);
- AddDependantsToWorklist(current);
- }
-}
-
-
-void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- AddToWorklist(it.value());
- }
- for (int i = 0; i < value->OperandCount(); ++i) {
- AddToWorklist(value->OperandAt(i));
- }
-}
-
-
-// This method calculates whether specializing the representation of the value
-// given as the parameter has a benefit in terms of less necessary type
-// conversions. If there is a benefit, then the representation of the value is
-// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* value) {
- Representation r = value->representation();
- if (r.IsSpecialization() || value->HasNoUses()) return;
- ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(value);
- if (!new_rep.IsNone()) {
- if (!value->representation().Equals(new_rep)) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on uses\n",
- value->id(),
- r.Mnemonic(),
- new_rep.Mnemonic());
- }
- value->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(value);
- }
- }
-}
-
-
-Representation HInferRepresentation::TryChange(HValue* value) {
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->ObservedInputRepresentation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- value->id(),
- value->Mnemonic(),
- use->id(),
- use->Mnemonic(),
- rep.Mnemonic());
- }
- if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
- use_count[rep.kind()] += use->LoopWeight();
- }
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int non_tagged_count = double_count + int32_count;
-
- // If a non-loop phi has tagged uses, don't convert it to untagged.
- if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
- return Representation::None();
- }
-
- // Prefer unboxing over boxing, the latter is more expensive.
- if (tagged_count > non_tagged_count) return Representation::None();
-
- // Prefer Integer32 over Double, if possible.
- if (int32_count > 0 && value->IsConvertibleToInteger()) {
- return Representation::Integer32();
- }
-
- if (double_count > 0) return Representation::Double();
-
- return Representation::None();
-}
-
-
void HInferRepresentation::Analyze() {
HPhase phase("H_Infer representations", graph_);
@@ -2421,7 +2303,6 @@
it.Advance()) {
HPhi* phi = phi_list->at(it.Current());
phi->set_is_convertible_to_integer(false);
- phi->ResetInteger32Uses();
}
}
@@ -2457,8 +2338,74 @@
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
in_worklist_.Remove(current->id());
- InferBasedOnInputs(current);
- InferBasedOnUses(current);
+ current->InferRepresentation(this);
+ }
+
+ // Lastly: any instruction that we don't have representation information
+ // for defaults to Tagged.
+ for (int i = 0; i < graph_->blocks()->length(); ++i) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ HPhi* phi = phis->at(j);
+ if (phi->representation().IsNone()) {
+ phi->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->representation().IsNone() &&
+ current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+ current->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ }
+}
+
+
+void HGraph::MergeRemovableSimulates() {
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HBasicBlock* block = blocks()->at(i);
+ // Always reset the folding candidate at the start of a block.
+ HSimulate* folding_candidate = NULL;
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ bool first = true;
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->IsLeaveInlined()) {
+ // Never fold simulates from inlined environments into simulates
+ // in the outer environment.
+ // (Before each HEnterInlined, there is a non-foldable HSimulate
+ // anyway, so we get the barrier in the other direction for free.)
+ if (folding_candidate != NULL) {
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ }
+ folding_candidate = NULL;
+ continue;
+ }
+ // If we have an HSimulate and a candidate, perform the folding.
+ if (!current->IsSimulate()) continue;
+ if (first) {
+ first = false;
+ continue;
+ }
+ HSimulate* current_simulate = HSimulate::cast(current);
+ if (folding_candidate != NULL) {
+ folding_candidate->MergeInto(current_simulate);
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ folding_candidate = NULL;
+ }
+ // Check if the current simulate is a candidate for folding.
+ if (current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) {
+ continue;
+ }
+ if (!current_simulate->is_candidate_for_removal()) {
+ continue;
+ }
+ folding_candidate = current_simulate;
+ }
}
}
@@ -2553,7 +2500,6 @@
} else {
next = HInstruction::cast(use_value);
}
-
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@@ -2565,7 +2511,7 @@
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
- new_value = is_truncating
+ new_value = (is_truncating && to.IsInteger32())
? constant->CopyToTruncatedInt32(zone())
: constant->CopyToRepresentation(to, zone());
}
@@ -2625,9 +2571,23 @@
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if ((input_representation.IsInteger32() &&
+ !use->CheckFlag(HValue::kTruncatingToInt32)) ||
+ input_representation.IsDouble()) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
+ }
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ change = true;
+ break;
+ }
}
}
}
@@ -2642,8 +2602,9 @@
// Process normal instructions.
HInstruction* current = blocks_[i]->first();
while (current != NULL) {
+ HInstruction* next = current->next();
InsertRepresentationChangesForValue(current);
- current = current->next();
+ current = next;
}
}
}
@@ -3038,7 +2999,9 @@
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
@@ -3062,7 +3025,9 @@
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
@@ -3094,7 +3059,7 @@
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id);
+ builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
@@ -3272,9 +3237,8 @@
// optimization. Disable optimistic LICM in that case.
Handle<Code> unoptimized_code(info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
int checksum = type_info->own_type_change_checksum();
int composite_checksum = graph()->update_type_change_checksum(checksum);
graph()->set_use_optimistic_licm(
@@ -3321,6 +3285,11 @@
HInferRepresentation rep(this);
rep.Analyze();
+ // Remove HSimulate instructions that have turned out not to be needed
+ // after all by folding them into the following HSimulate.
+ // This must happen after inferring representations.
+ MergeRemovableSimulates();
+
MarkDeoptimizeOnUndefined();
InsertRepresentationChanges();
@@ -3810,9 +3779,9 @@
}
-void HGraphBuilder::AddSimulate(BailoutId ast_id) {
+void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id);
+ current_block()->AddSimulate(ast_id, removable);
}
@@ -4169,7 +4138,7 @@
!clause->label()->IsStringLiteral()) ||
(switch_type == SMI_SWITCH &&
!clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatemnt: mixed label types are not supported");
+ return Bailout("SwitchStatement: mixed label types are not supported");
}
}
@@ -4223,12 +4192,13 @@
new(zone()) HCompareIDAndBranch(tag_value,
label_value,
Token::EQ_STRICT);
- compare_->SetInputRepresentation(Representation::Integer32());
+ compare_->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
compare = compare_;
} else {
compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
@@ -4587,7 +4557,8 @@
// Check that we still have more keys.
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->SetInputRepresentation(Representation::Integer32());
+ compare_index->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
@@ -4862,7 +4833,7 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::Tagged());
+ new(zone()) HConstant(expr->handle(), Representation::None());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5099,7 +5070,9 @@
map));
}
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(key->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(key->id(), REMOVABLE_SIMULATE);
+ }
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -5480,10 +5453,10 @@
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(value);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -5553,7 +5526,9 @@
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
@@ -5570,7 +5545,7 @@
&has_side_effects);
Push(value);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
}
@@ -5592,7 +5567,9 @@
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -5606,7 +5583,7 @@
instr->set_position(position);
AddInstruction(instr);
ASSERT(instr->HasObservableSideEffects());
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
}
@@ -5683,7 +5660,7 @@
new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -5723,7 +5700,9 @@
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -5731,7 +5710,9 @@
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
HInstruction* store;
if (!monomorphic) {
@@ -5753,7 +5734,9 @@
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
@@ -5769,7 +5752,7 @@
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5778,7 +5761,9 @@
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -5790,7 +5775,7 @@
Drop(3);
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
@@ -5913,7 +5898,7 @@
context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
@@ -6051,13 +6036,6 @@
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- if (!val->representation().IsInteger32()) {
- val = AddInstruction(new(zone()) HChange(
- val,
- Representation::Integer32(),
- true, // Truncate to int32.
- false)); // Don't deoptimize undefined (irrelevant here).
- }
break;
}
case EXTERNAL_FLOAT_ELEMENTS:
@@ -6678,10 +6656,10 @@
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(load);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -7136,9 +7114,8 @@
inlined_count_ += nodes_added;
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
TraceInline(target, caller, NULL);
@@ -7512,6 +7489,55 @@
}
+// Checks if all maps in |types| are from the same family, i.e., are elements
+// transitions of each other. Returns either NULL if they are not from the same
+// family, or a Map* indicating the map with the first elements kind of the
+// family that is in the list.
+static Map* CheckSameElementsFamily(SmallMapList* types) {
+ if (types->length() <= 1) return NULL;
+ // Check if all maps belong to the same transition family.
+ Map* kinds[kFastElementsKindCount];
+ Map* first_map = *types->first();
+ ElementsKind first_kind = first_map->elements_kind();
+ if (!IsFastElementsKind(first_kind)) return NULL;
+ int first_index = GetSequenceIndexFromFastElementsKind(first_kind);
+ int last_index = first_index;
+
+ for (int i = 0; i < kFastElementsKindCount; i++) kinds[i] = NULL;
+
+ kinds[first_index] = first_map;
+
+ for (int i = 1; i < types->length(); ++i) {
+ Map* map = *types->at(i);
+ ElementsKind elements_kind = map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return NULL;
+ int index = GetSequenceIndexFromFastElementsKind(elements_kind);
+ if (index < first_index) {
+ first_index = index;
+ } else if (index > last_index) {
+ last_index = index;
+ } else if (kinds[index] != map) {
+ return NULL;
+ }
+ kinds[index] = map;
+ }
+
+ Map* current = kinds[first_index];
+ for (int i = first_index + 1; i <= last_index; i++) {
+ Map* next = kinds[i];
+ if (next != NULL) {
+ ElementsKind current_kind = next->elements_kind();
+ if (next != current->LookupElementsTransitionMap(current_kind)) {
+ return NULL;
+ }
+ current = next;
+ }
+ }
+
+ return kinds[first_index];
+}
+
+
void HGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7551,15 +7577,25 @@
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
-
SmallMapList* types = expr->GetReceiverTypes();
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> receiver_map;
+ if (monomorphic) {
+ receiver_map = (types == NULL || types->is_empty())
+ ? Handle<Map>::null()
+ : types->first();
+ } else {
+ Map* family_map = CheckSameElementsFamily(types);
+ if (family_map != NULL) {
+ receiver_map = Handle<Map>(family_map);
+ monomorphic = expr->ComputeTarget(receiver_map, name);
+ }
+ }
+
HValue* receiver =
environment()->ExpressionStackAt(expr->arguments()->length());
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
+ if (monomorphic) {
if (TryInlineBuiltinMethodCall(expr,
receiver,
receiver_map,
@@ -7604,7 +7640,7 @@
VariableProxy* proxy = expr->expression()->AsVariableProxy();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
return Bailout("possible direct call to eval");
}
@@ -7930,14 +7966,13 @@
HInstruction* instr =
new(zone()) HMul(context, value, graph_->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
+ Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
info = TypeInfo::Unknown();
}
- Representation rep = ToRepresentation(info);
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
+ HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -8026,8 +8061,11 @@
: graph_->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
- TraceRepresentation(expr->op(), info, instr, rep);
+ // We can't insert a simulate here, because it would break deoptimization,
+ // so the HAdd must not have side effects, so we must freeze its
+ // representation.
instr->AssumeRepresentation(rep);
+ instr->ClearAllSideEffects();
AddInstruction(instr);
return instr;
}
@@ -8101,7 +8139,7 @@
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -8142,7 +8180,9 @@
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -8170,7 +8210,9 @@
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
} else {
// Keyed property.
@@ -8187,7 +8229,7 @@
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -8205,7 +8247,7 @@
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
}
@@ -8286,16 +8328,22 @@
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo info = oracle()->BinaryType(expr);
- if (info.IsUninitialized()) {
+ TypeInfo left_info, right_info, result_info, combined_info;
+ oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
+ Representation left_rep = ToRepresentation(left_info);
+ Representation right_rep = ToRepresentation(right_info);
+ Representation result_rep = ToRepresentation(result_info);
+ if (left_info.IsUninitialized()) {
+ // Can't have initialized one but not the other.
+ ASSERT(right_info.IsUninitialized());
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- info = TypeInfo::Unknown();
+ left_info = right_info = TypeInfo::Unknown();
}
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- if (info.IsString()) {
+ if (left_info.IsString() && right_info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
@@ -8323,7 +8371,7 @@
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
- if (info.IsInteger32() &&
+ if (left_info.IsInteger32() && right_info.IsInteger32() &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = new(zone()) HRor(context, operand, shift_amount);
} else {
@@ -8348,23 +8396,11 @@
UNREACHABLE();
}
- // If we hit an uninitialized binary op stub we will get type info
- // for a smi operation. If one of the operands is a constant string
- // do not generate code assuming it is a smi operation.
- if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
- return instr;
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation* binop = HBinaryOperation::cast(instr);
+ binop->set_observed_input_representation(left_rep, right_rep);
+ binop->initialize_output_representation(result_rep);
}
- Representation rep = ToRepresentation(info);
- // We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation()) {
- HBitwiseBinaryOperation::cast(instr)->
- InitializeObservedInputRepresentation(rep);
- if (rep.IsDouble()) rep = Representation::Integer32();
- }
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
return instr;
}
@@ -8508,27 +8544,8 @@
}
-void HGraphBuilder::TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep) {
- if (!FLAG_trace_representation) return;
- // TODO(svenpanne) Under which circumstances are we actually not flexible?
- // At first glance, this looks a bit weird...
- bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
- PrintF("Operation %s has type info %s, %schange representation assumption "
- "for %s (ID %d) from %s to %s\n",
- Token::Name(op),
- info.ToString(),
- flexible ? "" : " DO NOT ",
- value->Mnemonic(),
- graph_->GetMaximumValueID(),
- value->representation().Mnemonic(),
- rep.Mnemonic());
-}
-
-
Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+ if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
@@ -8626,13 +8643,17 @@
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo type_info = oracle()->CompareType(expr);
+ TypeInfo left_type, right_type, overall_type_info;
+ oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
+ Representation combined_rep = ToRepresentation(overall_type_info);
+ Representation left_rep = ToRepresentation(left_type);
+ Representation right_rep = ToRepresentation(right_type);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
- if (type_info.IsUninitialized()) {
+ if (overall_type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- type_info = TypeInfo::Unknown();
+ overall_type_info = left_type = right_type = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
@@ -8704,7 +8725,7 @@
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (type_info.IsNonPrimitive()) {
+ } else if (overall_type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
@@ -8731,8 +8752,7 @@
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
- (op == Token::EQ || op == Token::EQ_STRICT)) {
+ } else if (overall_type_info.IsSymbol() && Token::IsEqualityOp(op)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
@@ -8742,17 +8762,17 @@
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- Representation r = ToRepresentation(type_info);
- if (r.IsTagged()) {
+ if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
- result->SetInputRepresentation(r);
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8839,7 +8859,9 @@
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
}
break;
case Variable::LOOKUP:
@@ -8875,7 +8897,9 @@
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
break;
}
case Variable::LOOKUP:
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 3748970..78ab571 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -125,7 +125,10 @@
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ void AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE) {
+ AddInstruction(CreateSimulate(ast_id, removable));
+ }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@@ -166,7 +169,7 @@
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id);
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -255,6 +258,7 @@
void InitializeInferredTypes();
void InsertTypeConversions();
+ void MergeRemovableSimulates();
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
@@ -613,6 +617,25 @@
};
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+ explicit HInferRepresentation(HGraph* graph)
+ : graph_(graph),
+ worklist_(8, graph->zone()),
+ in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
+
+ void Analyze();
+ void AddToWorklist(HValue* current);
+
+ private:
+ Zone* zone() const { return graph_->zone(); }
+
+ HGraph* graph_;
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+};
+
+
class HGraphBuilder;
enum ArgumentsAllowedFlag {
@@ -880,7 +903,8 @@
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id);
+ void AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -1025,10 +1049,6 @@
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- void TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetUpScope(Scope* scope);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 7ea71e4..42086c8 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -735,6 +735,12 @@
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch);
+
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register scratch,
+ XMMRegister xmm_scratch);
};
@@ -755,11 +761,20 @@
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, ecx);
+ // If the exponent is above 83, the number contains no significant
+ // bits in the range 0..2^31, so the result is zero.
+ static const uint32_t kResultIsZeroExponent = 83;
+ __ cmp(scratch2, Immediate(kResultIsZeroExponent));
+ __ j(above, &done);
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
// Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ static const uint32_t kTooBigExponent = 63;
__ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
@@ -771,15 +786,11 @@
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ const uint32_t non_smi_exponent = 30;
__ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
@@ -791,8 +802,7 @@
{
// Handle a big exponent. The only reason we have this code is that the
// >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ const uint32_t big_non_smi_exponent = 31;
__ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
@@ -821,19 +831,8 @@
}
__ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done, Label::kNear);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
+ // Exponent word in scratch, exponent in scratch2. Zero in ecx.
+ // We know that 0 <= exponent < 30.
__ mov(ecx, Immediate(30));
__ sub(ecx, scratch2);
@@ -868,8 +867,8 @@
__ jmp(&done, Label::kNear);
__ bind(&negative);
__ sub(ecx, scratch2);
- __ bind(&done);
}
+ __ bind(&done);
}
@@ -1192,16 +1191,17 @@
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
__ push(eax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1210,7 +1210,7 @@
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -1220,11 +1220,7 @@
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
// Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1233,73 +1229,22 @@
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
// dividend in eax and edx free for the division. Use eax, ebx for those.
Comment load_comment(masm, "-- Load arguments");
Register left = edx;
Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
left = eax;
right = ebx;
__ mov(ebx, eax);
@@ -1312,7 +1257,7 @@
Label not_smis;
Register combined = ecx;
ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Perform the operation into eax and smi check the result. Preserve
// eax in case the result is not a smi.
@@ -1356,7 +1301,7 @@
// eax and check the result if necessary.
Comment perform_smi(masm, "-- Perform smi operation");
Label use_fp_on_smis;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Nothing to do.
break;
@@ -1490,7 +1435,7 @@
}
// 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -1513,9 +1458,9 @@
// 6. For some operations emit inline code to perform floating point
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
__ bind(&use_fp_on_smis);
- switch (op_) {
+ switch (op) {
// Undo the effects of some operations, and some register moves.
case Token::SHL:
// The arguments are saved on the stack, and only used from there.
@@ -1543,8 +1488,8 @@
}
__ jmp(¬_smis);
} else {
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
- switch (op_) {
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
+ switch (op) {
case Token::SHL:
case Token::SHR: {
Comment perform_float(masm, "-- Perform float operation on smis");
@@ -1555,13 +1500,13 @@
// Store the result in the HeapNumber and return.
// It's OK to overwrite the arguments on the stack because we
// are about to return.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ mov(Operand(esp, 1 * kPointerSize), left);
__ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
__ fild_d(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
} else {
- ASSERT_EQ(Token::SHL, op_);
+ ASSERT_EQ(Token::SHL, op);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, left);
@@ -1583,7 +1528,7 @@
Comment perform_float(masm, "-- Perform float operation on smis");
__ bind(&use_fp_on_smis);
// Restore arguments to edx, eax.
- switch (op_) {
+ switch (op) {
case Token::ADD:
// Revert right = right + left.
__ sub(right, left);
@@ -1609,7 +1554,7 @@
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1619,7 +1564,7 @@
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
@@ -1642,7 +1587,7 @@
// edx and eax.
Comment done_comment(masm, "-- Enter non-smi code");
__ bind(¬_smis);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
case Token::SHL:
case Token::SAR:
@@ -1689,9 +1634,11 @@
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
} else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
__ bind(&call_runtime);
switch (op_) {
@@ -1716,19 +1663,9 @@
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1756,6 +1693,11 @@
}
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
@@ -1763,7 +1705,7 @@
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
// Floating point case.
switch (op_) {
@@ -1776,6 +1718,18 @@
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_int32);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_int32);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
if (op_ == Token::MOD) {
@@ -1798,7 +1752,7 @@
__ test(ecx, Immediate(1));
__ j(zero, ¬_int32);
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
}
@@ -1824,7 +1778,8 @@
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -1849,10 +1804,14 @@
Label not_floats;
Label not_int32;
Label non_smi_result;
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
¬_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+ FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
¬_int32);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -1925,44 +1884,24 @@
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- break;
+ return; // Handled above.
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2011,7 +1950,28 @@
Label not_floats;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_floats);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_floats);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
+ if (left_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm0, ecx, xmm2);
+ }
+ if (right_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm1, ecx, xmm2);
+ }
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -2020,7 +1980,7 @@
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -2037,7 +1997,8 @@
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2063,8 +2024,12 @@
GenerateRegisterArgsPush(masm);
Label not_floats;
Label non_smi_result;
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
¬_floats);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2136,46 +2101,23 @@
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
case Token::MOD:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2204,7 +2146,8 @@
UNREACHABLE();
}
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
// Floating point case.
switch (op_) {
@@ -2224,7 +2167,7 @@
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -2241,7 +2184,8 @@
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2262,8 +2206,9 @@
case Token::SHL:
case Token::SHR: {
Label non_smi_result;
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
&call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2330,48 +2275,26 @@
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD: {
+ case Token::ADD:
GenerateAddStrings(masm);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
+ // Fall through.
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2407,11 +2330,10 @@
}
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
@@ -2923,16 +2845,22 @@
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
- __ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
- __ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm1, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
+}
+
+
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register scratch,
+ XMMRegister xmm_scratch) {
+ __ cvttsd2si(scratch, Operand(operand));
+ __ cvtsi2sd(xmm_scratch, scratch);
+ __ pcmpeqd(xmm_scratch, operand);
+ __ movmskpd(scratch, xmm_scratch);
+ __ test(scratch, Immediate(1));
+ __ j(zero, non_int32);
}
@@ -3960,9 +3888,9 @@
__ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
__ jmp(&setup_rest, Label::kNear);
@@ -4108,7 +4036,7 @@
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
__ test_b(ebx, kStringEncodingMask);
@@ -4321,30 +4249,59 @@
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- Label check_unequal_objects;
-
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected smi operands.");
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
}
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Label check_unequal_objects;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -4356,67 +4313,61 @@
__ cmp(eax, edx);
__ j(not_equal, ¬_identical);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ Set(eax, Immediate(0));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, edx);
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
__ ret(0);
} else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, ¬_identical);
- }
+ Label nan;
+ __ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
}
__ bind(¬_identical);
@@ -4424,7 +4375,7 @@
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
+ if (cc == equal && strict()) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
@@ -4495,70 +4446,68 @@
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
- __ Set(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
+ __ Set(eax, Immediate(0));
__ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
}
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc_ == equal) {
+ if (cc == equal) {
BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
@@ -4574,7 +4523,7 @@
&check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
@@ -4593,7 +4542,7 @@
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -4637,11 +4586,11 @@
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
// Restore return address on the stack.
@@ -4650,19 +4599,9 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -5407,44 +5346,6 @@
Register InstanceofStub::right() { return edx; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -5679,8 +5580,8 @@
__ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -5697,8 +5598,8 @@
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5706,7 +5607,7 @@
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
// Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -5748,13 +5649,6 @@
// edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
__ jmp(&allocated);
@@ -5780,10 +5674,10 @@
__ test_b(ecx, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&first_prepared);
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5801,10 +5695,10 @@
__ test_b(edi, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&second_prepared);
// Push the addresses of both strings' first characters onto the stack.
@@ -5825,7 +5719,7 @@
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load first argument's length and first character location. Account for
// values currently on the stack when fetching arguments from it.
__ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -6135,7 +6029,7 @@
temp, temp, &next_probe_pop_mask[i]);
// Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ and_(temp, 0x0000ffff);
__ cmp(chars, temp);
__ j(equal, &found_in_symbol_table);
@@ -6363,7 +6257,7 @@
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -6383,12 +6277,12 @@
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ pop(esi);
__ pop(ebx);
__ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6549,9 +6443,9 @@
// doesn't need an additional compare.
__ SmiUntag(length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -6606,7 +6500,7 @@
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -6632,31 +6526,52 @@
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SS2 or CMOV is unsupported.
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
- // Load left and right operand
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm1, ecx);
- // Compare operands
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm0, ecx);
+
+ __ bind(&done);
+ // Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
@@ -6670,17 +6585,30 @@
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
+ } else {
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -6698,7 +6626,7 @@
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -6743,7 +6671,7 @@
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6832,7 +6760,7 @@
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 803a711..29c16e1 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -154,96 +154,6 @@
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_sse3_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | SSE3Bits::encode(use_sse3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 4c79519..5898c31 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -751,7 +751,7 @@
__ movzx_b(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
__ bind(&done);
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 159c2ca..c7c6f8a 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -119,7 +119,7 @@
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -2329,7 +2329,7 @@
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -3580,7 +3580,7 @@
__ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
__ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ add(index, Immediate(1));
__ cmp(index, array_length);
@@ -3609,14 +3609,15 @@
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
+ kStringRepresentationMask));
__ cmp(scratch, ASCII_STRING_TYPE);
__ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
@@ -3630,11 +3631,11 @@
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3659,7 +3660,7 @@
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ bind(&loop_1_condition);
@@ -3672,7 +3673,7 @@
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
@@ -3700,7 +3701,7 @@
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3729,7 +3730,7 @@
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
@@ -3741,7 +3742,7 @@
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -4290,29 +4291,7 @@
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index dae3bbd..ac5af2b 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1715,7 +1715,7 @@
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1726,40 +1726,6 @@
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 6e86d92..3e19c39 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -302,7 +302,30 @@
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -4411,6 +4434,7 @@
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -4431,6 +4455,7 @@
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
+ __ RecordComment("Deferred TaggedToI: exponent too big");
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
@@ -4456,6 +4481,7 @@
}
} else {
// Deoptimize if we don't have a heap number.
+ __ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
@@ -4463,13 +4489,16 @@
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
__ ucomisd(xmm0, xmm_temp);
+ __ RecordComment("Deferred TaggedToI: lost precision");
DeoptimizeIf(not_equal, instr->environment());
+ __ RecordComment("Deferred TaggedToI: NaN");
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, &done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
+ __ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
}
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4207410..a07b8d9 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1449,7 +1449,7 @@
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -2289,7 +2289,7 @@
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 88b1ef0..8ee3ca0 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -614,7 +614,7 @@
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 26d0f92..41c8667 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -507,7 +507,8 @@
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor) {
+ bool specialize_for_processor,
+ int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -529,12 +530,14 @@
CpuFeatures::Scope use_sse2(SSE2);
movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ movdbl(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
jmp(&done);
@@ -561,13 +564,15 @@
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatures::Scope fscope(SSE2);
cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ movdbl(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
push(scratch1);
fild_s(Operand(esp, 0));
pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
bind(&done);
}
@@ -1453,14 +1458,14 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
result,
@@ -1488,7 +1493,7 @@
ASSERT(length > 0);
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::SizeFor(length),
+ AllocateInNewSpace(SeqOneByteString::SizeFor(length),
result,
scratch1,
scratch2,
@@ -2638,15 +2643,17 @@
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
// Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask
+ | kStringEncodingMask | kAsciiDataHintTag;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
// Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch1, kFlatAsciiStringMask);
and_(scratch2, kFlatAsciiStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ shl(scratch1, 8);
+ or_(scratch1, scratch2);
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 8));
j(not_equal, failure);
}
@@ -2913,8 +2920,8 @@
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
- ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqAsciiString::kMaxSize <=
+ ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqOneByteString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e48d0e7..3c74f19 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -388,7 +388,8 @@
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor);
+ bool specialize_for_processor,
+ int offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 127d16a..28a3994 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1523,7 +1523,7 @@
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1531,7 +1531,7 @@
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1562,17 +1562,49 @@
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+
+ // Check that the elements are in double mode.
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(factory()->fixed_double_array_map()));
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into eax and calculate new length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(eax, Immediate(Smi::FromInt(argc)));
+
+ // Get the elements' length into ecx.
+ __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(eax, ecx);
+ __ j(greater, &call_builtin);
+
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, ¬_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(¬_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(factory()->heap_number_map()));
+ __ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 49b6ef9..77f409a 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -43,7 +43,8 @@
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = Isolate::Current()->debug();
+ ASSERT(Isolate::Current() == isolate());
+ Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
if (!debug->has_break_points()) return result;
diff --git a/src/ic.cc b/src/ic.cc
index 1418f02..5212004 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -310,7 +310,8 @@
if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
return;
}
- Code* host = target->GetHeap()->isolate()->
+ Isolate* isolate = target->GetHeap()->isolate();
+ Code* host = isolate->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
@@ -333,7 +334,7 @@
}
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
- Isolate::Current()->runtime_profiler()->NotifyICChanged();
+ isolate->runtime_profiler()->NotifyICChanged();
}
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -414,11 +415,13 @@
void CompareIC::Clear(Address address, Code* target) {
- // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
- if (target->major_key() != CodeStub::CompareIC) return;
+ ASSERT(target->major_key() == CodeStub::CompareIC);
+ CompareIC::State handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
+ &handler_state, &op);
// Only clear CompareICs that can retain objects.
- if (target->compare_state() != KNOWN_OBJECTS) return;
- Token::Value op = CompareIC::ComputeOperation(target);
+ if (handler_state != KNOWN_OBJECTS) return;
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -2313,11 +2316,10 @@
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "SMI";
- case INT32: return "Int32s";
- case HEAP_NUMBER: return "HeapNumbers";
+ case INT32: return "Int32";
+ case HEAP_NUMBER: return "HeapNumber";
case ODDBALL: return "Oddball";
- case BOTH_STRING: return "BothStrings";
- case STRING: return "Strings";
+ case STRING: return "String";
case GENERIC: return "Generic";
default: return "Invalid";
}
@@ -2332,7 +2334,6 @@
case INT32:
case HEAP_NUMBER:
case ODDBALL:
- case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
@@ -2343,58 +2344,6 @@
}
-BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
- BinaryOpIC::TypeInfo y) {
- if (x == UNINITIALIZED) return y;
- if (y == UNINITIALIZED) return x;
- if (x == y) return x;
- if (x == BOTH_STRING && y == STRING) return STRING;
- if (x == STRING && y == BOTH_STRING) return STRING;
- if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
- return GENERIC;
- }
- if (x > y) return x;
- return y;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
- ::v8::internal::TypeInfo left_type =
- ::v8::internal::TypeInfo::TypeFromValue(left);
- ::v8::internal::TypeInfo right_type =
- ::v8::internal::TypeInfo::TypeFromValue(right);
-
- if (left_type.IsSmi() && right_type.IsSmi()) {
- return SMI;
- }
-
- if (left_type.IsInteger32() && right_type.IsInteger32()) {
- // Platforms with 32-bit Smis have no distinct INT32 type.
- if (kSmiValueSize == 32) return SMI;
- return INT32;
- }
-
- if (left_type.IsNumber() && right_type.IsNumber()) {
- return HEAP_NUMBER;
- }
-
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- if (left_type.IsString()) {
- return right_type.IsString() ? BOTH_STRING : STRING;
- } else if (right_type.IsString()) {
- return STRING;
- }
-
- // Check for oddball objects.
- if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
- if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
-
- return GENERIC;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
@@ -2446,25 +2395,72 @@
return *result;
}
+
+static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
+ Token::Value op) {
+ ::v8::internal::TypeInfo type =
+ ::v8::internal::TypeInfo::TypeFromValue(value);
+ if (type.IsSmi()) return BinaryOpIC::SMI;
+ if (type.IsInteger32()) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ if (type.IsNumber()) return BinaryOpIC::HEAP_NUMBER;
+ if (type.IsString()) return BinaryOpIC::STRING;
+ if (value->IsUndefined()) {
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ return BinaryOpIC::ODDBALL;
+ }
+ return BinaryOpIC::GENERIC;
+}
+
+
+static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
+ Handle<Object> value,
+ Token::Value op) {
+ BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
+ if (old_type == BinaryOpIC::STRING) {
+ if (new_type == BinaryOpIC::STRING) return new_type;
+ return BinaryOpIC::GENERIC;
+ }
+ return Max(old_type, new_type);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 3);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
- Token::Value op = static_cast<Token::Value>(args.smi_at(3));
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
+ Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
+ BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
+ BinaryOpStub::decode_types_from_minor_key(
+ key, &previous_left, &previous_right, &unused_previous_result);
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
- type = BinaryOpIC::JoinTypes(type, previous_type);
+ BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
+ BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
- if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+
+ // STRING is only used for ADD operations.
+ if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
op != Token::ADD) {
- type = BinaryOpIC::GENERIC;
+ new_left = new_right = BinaryOpIC::GENERIC;
}
- if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+
+ BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
+ BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
+
+ if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
@@ -2479,26 +2475,35 @@
result_type = BinaryOpIC::INT32;
}
}
- if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
- // We must be here because an operation on two INT32 types overflowed.
- result_type = BinaryOpIC::HEAP_NUMBER;
+ if (new_overall == BinaryOpIC::INT32 &&
+ previous_overall == BinaryOpIC::INT32) {
+ if (new_left == previous_left && new_right == previous_right) {
+ result_type = BinaryOpIC::HEAP_NUMBER;
+ }
}
- BinaryOpStub stub(key, type, result_type);
+ BinaryOpStub stub(key, new_left, new_right, result_type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
+#ifdef DEBUG
if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
+ PrintF("[BinaryOpIC in ");
+ JavaScriptFrame::PrintTop(stdout, false, true);
+ PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
+ BinaryOpIC::GetName(previous_left),
+ BinaryOpIC::GetName(previous_right),
+ BinaryOpIC::GetName(new_left),
+ BinaryOpIC::GetName(new_right),
BinaryOpIC::GetName(result_type),
- Token::Name(op));
+ Token::Name(op),
+ static_cast<void*>(*code));
}
+#endif
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
- if (previous_type == BinaryOpIC::UNINITIALIZED) {
+ if (previous_overall == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
@@ -2561,7 +2566,7 @@
Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
@@ -2569,35 +2574,20 @@
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
return stub.GetCode();
}
-CompareIC::State CompareIC::ComputeState(Code* target) {
- int key = target->major_key();
- if (key == CodeStub::Compare) return GENERIC;
- ASSERT(key == CodeStub::CompareIC);
- return static_cast<State>(target->compare_state());
-}
-
-
-Token::Value CompareIC::ComputeOperation(Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(
- target->compare_operation() + Token::EQ);
-}
-
-
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
- case SMIS: return "SMIS";
- case HEAP_NUMBERS: return "HEAP_NUMBERS";
- case OBJECTS: return "OBJECTS";
+ case SMI: return "SMI";
+ case HEAP_NUMBER: return "HEAP_NUMBER";
+ case OBJECT: return "OBJECTS";
case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
- case SYMBOLS: return "SYMBOLS";
- case STRINGS: return "STRINGS";
+ case SYMBOL: return "SYMBOL";
+ case STRING: return "STRING";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
@@ -2606,28 +2596,67 @@
}
-CompareIC::State CompareIC::TargetState(State state,
+static CompareIC::State InputState(CompareIC::State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case CompareIC::UNINITIALIZED:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
+ if (value->IsSymbol()) return CompareIC::SYMBOL;
+ if (value->IsString()) return CompareIC::STRING;
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::SMI:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
+ break;
+ case CompareIC::HEAP_NUMBER:
+ if (value->IsNumber()) return CompareIC::HEAP_NUMBER;
+ break;
+ case CompareIC::SYMBOL:
+ if (value->IsSymbol()) return CompareIC::SYMBOL;
+ if (value->IsString()) return CompareIC::STRING;
+ break;
+ case CompareIC::STRING:
+ if (value->IsSymbol() || value->IsString()) return CompareIC::STRING;
+ break;
+ case CompareIC::OBJECT:
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::GENERIC:
+ break;
+ case CompareIC::KNOWN_OBJECTS:
+ UNREACHABLE();
+ break;
+ }
+ return CompareIC::GENERIC;
+}
+
+
+CompareIC::State CompareIC::TargetState(State old_state,
+ State old_left,
+ State old_right,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- switch (state) {
+ switch (old_state) {
case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMIS;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBER;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
// HEAP_NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
- return HEAP_NUMBERS;
+ return HEAP_NUMBER;
}
}
if (x->IsSymbol() && y->IsSymbol()) {
// We compare symbols as strings if we need to determine
// the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
+ return Token::IsEqualityOp(op_) ? SYMBOL : STRING;
}
- if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsString() && y->IsString()) return STRING;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
@@ -2635,30 +2664,70 @@
Token::IsEqualityOp(op_)) {
return KNOWN_OBJECTS;
} else {
- return OBJECTS;
+ return OBJECT;
}
}
return GENERIC;
- case SMIS:
- return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
- ? HEAP_NUMBERS
+ case SMI:
+ return x->IsNumber() && y->IsNumber()
+ ? HEAP_NUMBER
: GENERIC;
- case SYMBOLS:
+ case SYMBOL:
ASSERT(Token::IsEqualityOp(op_));
- return x->IsString() && y->IsString() ? STRINGS : GENERIC;
- case HEAP_NUMBERS:
- case STRINGS:
- case OBJECTS:
+ return x->IsString() && y->IsString() ? STRING : GENERIC;
+ case HEAP_NUMBER:
+ if (old_left == SMI && x->IsHeapNumber()) return HEAP_NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return HEAP_NUMBER;
+ case STRING:
+ case OBJECT:
case KNOWN_OBJECTS:
case GENERIC:
return GENERIC;
}
UNREACHABLE();
- return GENERIC;
+ return GENERIC; // Make the compiler happy.
}
-// Used from ic_<arch>.cc.
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ State previous_left, previous_right, previous_state;
+ ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
+ &previous_right, &previous_state, NULL);
+ State new_left = InputState(previous_left, x);
+ State new_right = InputState(previous_right, y);
+ State state = TargetState(previous_state, previous_left, previous_right,
+ HasInlinedSmiCode(address()), x, y);
+ ICCompareStub stub(op_, new_left, new_right, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ set_target(*stub.GetCode());
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC in ");
+ JavaScriptFrame::PrintTop(stdout, false, true);
+ PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
+ GetStateName(previous_left),
+ GetStateName(previous_right),
+ GetStateName(previous_state),
+ GetStateName(new_left),
+ GetStateName(new_right),
+ GetStateName(state),
+ Token::Name(op_),
+ static_cast<void*>(*stub.GetCode()));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
+}
+
+
+// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
diff --git a/src/ic.h b/src/ic.h
index 8767f98..bfccd10 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -771,8 +771,7 @@
INT32,
HEAP_NUMBER,
ODDBALL,
- BOTH_STRING, // Only used for addition operation.
- STRING, // Only used for addition operation. At least one string operand.
+ STRING, // Only used for addition operation.
GENERIC
};
@@ -783,10 +782,6 @@
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
-
- static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
};
@@ -794,11 +789,11 @@
public:
enum State {
UNINITIALIZED,
- SMIS,
- HEAP_NUMBERS,
- SYMBOLS,
- STRINGS,
- OBJECTS,
+ SMI,
+ HEAP_NUMBER,
+ SYMBOL,
+ STRING,
+ OBJECT,
KNOWN_OBJECTS,
GENERIC
};
@@ -809,27 +804,27 @@
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
// Factory method for getting an uninitialized compare stub.
static Handle<Code> GetUninitialized(Token::Value op);
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- // Helper function for determining the state of a compare IC.
- static State ComputeState(Code* target);
-
- // Helper function for determining the operation a compare IC is for.
- static Token::Value ComputeOperation(Code* target);
-
static const char* GetStateName(State state);
private:
- State TargetState(State state, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
+ static bool HasInlinedSmiCode(Address address);
+
+ State TargetState(State old_state,
+ State old_left,
+ State old_right,
+ bool has_inlined_smi_code,
+ Handle<Object> x,
+ Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index b34d6d9..e9fd74c 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -188,16 +188,78 @@
}
+static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (mark_bit.Get()) return;
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
+static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (Marking::IsBlack(mark_bit)) return;
+ Marking::MarkBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
class IncrementalMarkingMarkingVisitor
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
static void Initialize() {
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
-
+ table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
+ static const int kProgressBarScanningChunk = 32 * 1024;
+
+ static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ // TODO(mstarzinger): Move setting of the flag to the allocation site of
+ // the array. The visitor should just check the flag.
+ if (FLAG_use_marking_progress_bar &&
+ chunk->owner()->identity() == LO_SPACE) {
+ chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ Heap* heap = map->GetHeap();
+ // When using a progress bar for large fixed arrays, scan only a chunk of
+ // the array and try to push it onto the marking deque again until it is
+ // fully scanned. Fall back to scanning it through to the end in case this
+ // fails because of a full deque.
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
+ chunk->progress_bar());
+ int end_offset = Min(object_size,
+ start_offset + kProgressBarScanningChunk);
+ bool scan_until_end = false;
+ do {
+ VisitPointersWithAnchor(heap,
+ HeapObject::RawField(object, 0),
+ HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+ } while (scan_until_end && start_offset < object_size);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+ }
+ } else {
+ FixedArrayVisitor::Visit(map, object);
+ }
+ }
+
static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
Context* context = Context::cast(object);
@@ -234,15 +296,25 @@
}
}
+ INLINE(static void VisitPointersWithAnchor(Heap* heap,
+ Object** anchor,
+ Object** start,
+ Object** end)) {
+ for (Object** p = start; p < end; p++) {
+ Object* obj = *p;
+ if (obj->NonFailureIsHeapObject()) {
+ heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+ MarkObject(heap, obj);
+ }
+ }
+ }
+
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else if (Marking::IsWhite(mark_bit)) {
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
@@ -288,10 +360,7 @@
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else {
if (Marking::IsWhite(mark_bit)) {
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -616,8 +685,11 @@
ASSERT(new_top != marking_deque_.bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
#endif
}
}
@@ -637,11 +709,15 @@
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
- MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
- Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
+#endif
+ MarkBlackOrKeepBlack(obj, mark_bit, size);
}
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index 6ae0f59..77d54be 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -164,19 +164,6 @@
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
- // Does white->black or keeps gray or black color. Returns true if converting
- // white to black.
- inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) {
- // Grey or black: Keep the color.
- return false;
- }
- mark_bit.Set();
- ASSERT(Marking::IsBlack(mark_bit));
- return true;
- }
-
inline int steps_count() {
return steps_count_;
}
diff --git a/src/json-parser.h b/src/json-parser.h
index 6f8c715..2f980cc 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -58,7 +58,7 @@
if (position_ >= source_length_) {
c0_ = kEndOfString;
} else if (seq_ascii) {
- c0_ = seq_source_->SeqAsciiStringGet(position_);
+ c0_ = seq_source_->SeqOneByteStringGet(position_);
} else {
c0_ = source_->Get(position_);
}
@@ -160,7 +160,7 @@
private:
Handle<String> source_;
int source_length_;
- Handle<SeqAsciiString> seq_source_;
+ Handle<SeqOneByteString> seq_source_;
PretenureFlag pretenure_;
Isolate* isolate_;
@@ -186,7 +186,7 @@
// Optimized fast case where we only have ASCII characters.
if (seq_ascii) {
- seq_source_ = Handle<SeqAsciiString>::cast(source_);
+ seq_source_ = Handle<SeqOneByteString>::cast(source_);
}
// Set initial position right before the string.
@@ -472,8 +472,8 @@
}
template <>
-inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
- seq_str->SeqAsciiStringSet(i, c);
+inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqOneByteStringSet(i, c);
}
template <typename StringType>
@@ -489,10 +489,10 @@
}
template <>
-inline Handle<SeqAsciiString> NewRawString(Factory* factory,
+inline Handle<SeqOneByteString> NewRawString(Factory* factory,
int length,
PretenureFlag pretenure) {
- return factory->NewRawAsciiString(length, pretenure);
+ return factory->NewRawOneByteString(length, pretenure);
}
@@ -530,7 +530,7 @@
SeqStringSet(seq_str, count++, c0_);
Advance();
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
}
} else {
@@ -570,7 +570,7 @@
SeqStringSet(seq_str, count++, value);
break;
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance();
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
@@ -626,7 +626,7 @@
c0_ = c0;
int beg_pos = position_;
position_ = position;
- return SlowScanJsonString<SeqAsciiString, char>(source_,
+ return SlowScanJsonString<SeqOneByteString, char>(source_,
beg_pos,
position_);
}
@@ -634,7 +634,7 @@
running_hash = StringHasher::AddCharacterCore(running_hash, c0);
position++;
if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqAsciiStringGet(position);
+ c0 = seq_source_->SeqOneByteStringGet(position);
} while (c0 != '"');
int length = position - position_;
uint32_t hash = (length <= String::kMaxHashCalcLength)
@@ -677,7 +677,7 @@
position_);
}
} else {
- return SlowScanJsonString<SeqAsciiString, char>(source_,
+ return SlowScanJsonString<SeqOneByteString, char>(source_,
beg_pos,
position_);
}
@@ -689,8 +689,8 @@
beg_pos,
length);
} else {
- result = factory()->NewRawAsciiString(length, pretenure_);
- char* dest = SeqAsciiString::cast(*result)->GetChars();
+ result = factory()->NewRawOneByteString(length, pretenure_);
+ char* dest = SeqOneByteString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
}
ASSERT_EQ('"', c0_);
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 68dc82a..3866343 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -219,7 +219,7 @@
accumulator_store_ = Handle<JSValue>::cast(
factory_->ToObject(factory_->empty_string()));
part_length_ = kInitialPartLength;
- current_part_ = factory_->NewRawAsciiString(kInitialPartLength);
+ current_part_ = factory_->NewRawOneByteString(kInitialPartLength);
tojson_symbol_ = factory_->LookupAsciiSymbol("toJSON");
stack_ = factory_->NewJSArray(8);
}
@@ -246,7 +246,7 @@
template <bool is_ascii, typename Char>
void BasicJsonStringifier::Append_(Char c) {
if (is_ascii) {
- SeqAsciiString::cast(*current_part_)->SeqAsciiStringSet(
+ SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
current_index_++, c);
} else {
SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
@@ -636,8 +636,8 @@
int string_size, allocated_string_size;
if (is_ascii_) {
- allocated_string_size = SeqAsciiString::SizeFor(part_length_);
- string_size = SeqAsciiString::SizeFor(current_index_);
+ allocated_string_size = SeqOneByteString::SizeFor(part_length_);
+ string_size = SeqOneByteString::SizeFor(current_index_);
} else {
allocated_string_size = SeqTwoByteString::SizeFor(part_length_);
string_size = SeqTwoByteString::SizeFor(current_index_);
@@ -663,7 +663,7 @@
part_length_ *= kPartLengthGrowthFactor;
}
if (is_ascii_) {
- current_part_ = factory_->NewRawAsciiString(part_length_);
+ current_part_ = factory_->NewRawOneByteString(part_length_);
} else {
current_part_ = factory_->NewRawTwoByteString(part_length_);
}
@@ -719,7 +719,7 @@
if (is_ascii) {
SerializeStringUnchecked_(
vector.start(),
- SeqAsciiString::cast(*current_part_)->GetChars(),
+ SeqOneByteString::cast(*current_part_)->GetChars(),
length);
} else {
SerializeStringUnchecked_(
diff --git a/src/liveedit.cc b/src/liveedit.cc
index dc7d4b1..574a376 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -703,12 +703,14 @@
: JSArrayBasedStruct<FunctionInfoWrapper>(array) {
}
void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int parent_index) {
+ int end_position, int param_num,
+ int literal_count, int parent_index) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
+ this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
@@ -726,6 +728,9 @@
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
+ int GetLiteralCount() {
+ return this->GetSmiValueField(kLiteralNumOffset_);
+ }
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -759,7 +764,8 @@
static const int kOuterScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
- static const int kSize_ = 9;
+ static const int kLiteralNumOffset_ = 9;
+ static const int kSize_ = 10;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -819,6 +825,7 @@
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
+ fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -1014,6 +1021,129 @@
}
+// Patch function literals.
+// Name 'literals' is a misnomer. Rather it's a cache for complex object
+// boilerplates and for a native context. We must clean cached values.
+// Additionally we may need to allocate a new array if number of literals
+// changed.
+class LiteralFixer {
+ public:
+ static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ int new_literal_count = compile_info_wrapper->GetLiteralCount();
+ if (new_literal_count > 0) {
+ new_literal_count += JSFunction::kLiteralsPrefixSize;
+ }
+ int old_literal_count = shared_info->num_literals();
+
+ if (old_literal_count == new_literal_count) {
+ // If literal count didn't change, simply go over all functions
+ // and clear literal arrays.
+ ClearValuesVisitor visitor;
+ IterateJSFunctions(*shared_info, &visitor);
+ } else {
+ // When literal count changes, we have to create new array instances.
+ // Since we cannot create instances when iterating heap, we should first
+ // collect all functions and fix their literal arrays.
+ Handle<FixedArray> function_instances =
+ CollectJSFunctions(shared_info, isolate);
+ for (int i = 0; i < function_instances->length(); i++) {
+ Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+ Handle<FixedArray> old_literals(fun->literals());
+ Handle<FixedArray> new_literals =
+ isolate->factory()->NewFixedArray(new_literal_count);
+ if (new_literal_count > 0) {
+ Handle<Context> native_context;
+ if (old_literals->length() >
+ JSFunction::kLiteralNativeContextIndex) {
+ native_context = Handle<Context>(
+ JSFunction::NativeContextFromLiterals(fun->literals()));
+ } else {
+ native_context = Handle<Context>(fun->context()->native_context());
+ }
+ new_literals->set(JSFunction::kLiteralNativeContextIndex,
+ *native_context);
+ }
+ fun->set_literals(*new_literals);
+ }
+
+ shared_info->set_num_literals(new_literal_count);
+ }
+ }
+
+ private:
+ // Iterates all function instances in the HEAP that refers to the
+ // provided shared_info.
+ template<typename Visitor>
+ static void IterateJSFunctions(SharedFunctionInfo* shared_info,
+ Visitor* visitor) {
+ AssertNoAllocation no_allocations_please;
+
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL;
+ obj = iterator.next()) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ if (function->shared() == shared_info) {
+ visitor->visit(function);
+ }
+ }
+ }
+ }
+
+ // Finds all instances of JSFunction that refers to the provided shared_info
+ // and returns array with them.
+ static Handle<FixedArray> CollectJSFunctions(
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
+ CountVisitor count_visitor;
+ count_visitor.count = 0;
+ IterateJSFunctions(*shared_info, &count_visitor);
+ int size = count_visitor.count;
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
+ if (size > 0) {
+ CollectVisitor collect_visitor(result);
+ IterateJSFunctions(*shared_info, &collect_visitor);
+ }
+ return result;
+ }
+
+ class ClearValuesVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ FixedArray* literals = fun->literals();
+ int len = literals->length();
+ for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
+ literals->set_undefined(j);
+ }
+ }
+ };
+
+ class CountVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ count++;
+ }
+ int count;
+ };
+
+ class CollectVisitor {
+ public:
+ explicit CollectVisitor(Handle<FixedArray> output)
+ : m_output(output), m_pos(0) {}
+
+ void visit(JSFunction* fun) {
+ m_output->set(m_pos, fun);
+ m_pos++;
+ }
+ private:
+ Handle<FixedArray> m_output;
+ int m_pos;
+ };
+};
+
+
// Check whether the code is natural function code (not a lazy-compile stub
// code).
static bool IsJSFunctionCode(Code* code) {
@@ -1080,9 +1210,10 @@
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
HandleScope scope;
+ Isolate* isolate = Isolate::Current();
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
@@ -1113,6 +1244,8 @@
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
+ LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
+
shared_info->set_construct_stub(
Isolate::Current()->builtins()->builtin(
Builtins::kJSConstructStubGeneric));
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 6b89cf6..6dbe0a8 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -71,7 +71,7 @@
v(ExternalAsciiString, "unexpected: ExternalAsciiString") \
v(ExternalString, "unexpected: ExternalString") \
v(SeqTwoByteString, "unexpected: SeqTwoByteString") \
- v(SeqAsciiString, "unexpected: SeqAsciiString") \
+ v(SeqOneByteString, "unexpected: SeqOneByteString") \
v(SeqString, "unexpected: SeqString") \
v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
v(NativeContext, "unexpected: NativeContext") \
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1af3074..30abe6d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -488,6 +488,7 @@
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
mark_bit.Next().Clear();
+ Page::FromAddress(obj->address())->ResetProgressBar();
Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
diff --git a/src/messages.js b/src/messages.js
index 3d6c8ce..0a50ae7 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -252,13 +252,8 @@
function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) {
- args = [];
- }
- var e = new constructor(FormatMessage(type, args));
- e.type = type;
- e.arguments = args;
- return e;
+ if (IS_UNDEFINED(args)) args = [];
+ return new constructor(FormatMessage(type, args));
}
@@ -1143,8 +1138,6 @@
// object. This avoids going through getters and setters defined
// on prototype objects.
%IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
@@ -1204,7 +1197,6 @@
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
- var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 3e726a7..caf544f 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -1,3 +1,4 @@
+
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -231,6 +232,24 @@
}
+static const int kNoCodeAgeSequenceLength = 7;
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -302,6 +321,8 @@
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -328,6 +349,8 @@
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index fd2ff0d..db797ce 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -663,7 +663,9 @@
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ // Code aging
+ CODE_AGE_MARKER_NOP = 6
};
// Type == 0 is the default non-marking nop. For mips this is a
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 0342e65..b2348fc 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1255,6 +1255,48 @@
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - function object
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ __ MultiPop(saved_regs);
+ __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 7f7d70e..f1f921f 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -5322,7 +5322,7 @@
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -6071,7 +6071,7 @@
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
__ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6288,8 +6288,8 @@
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
@@ -6304,13 +6304,13 @@
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6442,7 +6442,7 @@
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6597,8 +6597,8 @@
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6616,7 +6616,7 @@
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6701,11 +6701,11 @@
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6716,11 +6716,11 @@
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6741,7 +6741,7 @@
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 44e0359..51ad474 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -408,7 +408,7 @@
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -446,6 +446,100 @@
__ bind(&done);
}
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->Push(ra, fp, cp, a1);
+ patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+byte* Code::FindPlatformCodeAgeSequence() {
+ byte* start = instruction_start();
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (!memcmp(start, young_sequence, young_length) ||
+ Memory::uint32_at(start) == kCodeAgePatchFirstInstruction) {
+ return start;
+ } else {
+ byte* start_after_strict = NULL;
+ if (kind() == FUNCTION) {
+ start_after_strict = start + kSizeOfFullCodegenStrictModePrologue;
+ } else {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ start_after_strict = start + kSizeOfOptimizedStrictModePrologue;
+ }
+ ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
+ Memory::uint32_at(start_after_strict) ==
+ kCodeAgePatchFirstInstruction);
+ return start_after_strict;
+ }
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ // Mark this code sequence for FindPlatformCodeAgeSequence()
+ patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ patcher.masm()->mov(at, ra);
+ // Load the stub address to t9 and call it
+ patcher.masm()->li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ patcher.masm()->Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index e704c4f..27c8a8b 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -36,6 +36,9 @@
namespace v8 {
namespace internal {
+static const int kSizeOfFullCodegenStrictModePrologue = 16;
+static const int kSizeOfOptimizedStrictModePrologue = 16;
+
// Forward declarations
class CompilationInfo;
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 3e89fb4..d408c8b 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -139,7 +139,7 @@
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -158,11 +158,14 @@
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
+ Label begin;
+ __ bind(&begin);
__ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok);
+ ASSERT_EQ(kSizeOfFullCodegenStrictModePrologue, ok.pos() - begin.pos());
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -172,12 +175,12 @@
int locals_count = info->scope()->num_stack_slots();
+ // The following three instructions must remain together and unmodified for
+ // code aging to work properly.
__ Push(ra, fp, cp, a1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- }
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
@@ -2402,7 +2405,7 @@
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -3656,7 +3659,7 @@
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3683,7 +3686,7 @@
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3723,10 +3726,10 @@
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3743,7 +3746,7 @@
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3753,7 +3756,7 @@
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3775,7 +3778,7 @@
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3796,7 +3799,7 @@
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3804,7 +3807,7 @@
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index f79208e..902736e 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -136,15 +136,23 @@
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
+ Label begin;
+ __ bind(&begin);
__ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok);
+ ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos());
}
+ // The following three instructions must remain together and unmodified for
+ // code aging to work properly.
__ Push(ra, fp, cp, a1);
+ // Add unused load of ip to ensure prologue sequence is identical for
+ // full-codegen and lithium-codegen.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
@@ -1140,6 +1148,9 @@
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
+ case Token::ROR:
+ __ Ror(result, left, Operand(ToRegister(right_op)));
+ break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1161,6 +1172,13 @@
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ Ror(result, left, Operand(shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index b5eb128..fa84882 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -177,6 +177,7 @@
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -1099,6 +1100,11 @@
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index aebfe73..11c641d 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3109,9 +3109,9 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 22e6174..563e302 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1554,7 +1554,7 @@
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -2920,7 +2920,7 @@
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -3111,7 +3111,7 @@
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- int index) {
+ PropertyIndex index) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
diff --git a/src/object-observe.js b/src/object-observe.js
index 28aa1f4..ea5de1f 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -27,15 +27,12 @@
"use strict";
-var InternalObjectIsFrozen = $Object.isFrozen;
-var InternalObjectFreeze = $Object.freeze;
-
var observationState = %GetObservationState();
if (IS_UNDEFINED(observationState.observerInfoMap)) {
observationState.observerInfoMap = %CreateObjectHashTable();
observationState.objectInfoMap = %CreateObjectHashTable();
observationState.notifierTargetMap = %CreateObjectHashTable();
- observationState.activeObservers = new InternalArray;
+ observationState.pendingObservers = new InternalArray;
observationState.observerPriority = 0;
}
@@ -74,7 +71,7 @@
throw MakeTypeError("observe_non_object", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
- if (InternalObjectIsFrozen(callback))
+ if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
if (!observerInfoMap.has(callback)) {
@@ -100,6 +97,8 @@
function ObjectUnobserve(object, callback) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["unobserve"]);
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["unobserve"]);
var objectInfo = objectInfoMap.get(object);
if (IS_UNDEFINED(objectInfo))
@@ -117,7 +116,7 @@
for (var i = 0; i < observers.length; i++) {
var observer = observers[i];
var observerInfo = observerInfoMap.get(observer);
- observationState.activeObservers[observerInfo.priority] = observer;
+ observationState.pendingObservers[observerInfo.priority] = observer;
%SetObserverDeliveryPending();
if (IS_NULL(observerInfo.pendingChangeRecords)) {
observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
@@ -132,7 +131,7 @@
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
- InternalObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -162,9 +161,11 @@
for (var prop in changeRecord) {
if (prop === 'object')
continue;
- newRecord[prop] = changeRecord[prop];
+
+ %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
+ READ_ONLY + DONT_DELETE);
}
- InternalObjectFreeze(newRecord);
+ ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
@@ -173,7 +174,7 @@
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["getNotifier"]);
- if (InternalObjectIsFrozen(object))
+ if (ObjectIsFrozen(object))
return null;
var objectInfo = objectInfoMap.get(object);
@@ -200,6 +201,7 @@
return;
observerInfo.pendingChangeRecords = null;
+ delete observationState.pendingObservers[observerInfo.priority];
var delivered = [];
%MoveArrayContents(pendingChangeRecords, delivered);
try {
@@ -215,11 +217,11 @@
}
function DeliverChangeRecords() {
- while (observationState.activeObservers.length) {
- var activeObservers = observationState.activeObservers;
- observationState.activeObservers = new InternalArray;
- for (var i in activeObservers) {
- DeliverChangeRecordsForObserver(activeObservers[i]);
+ while (observationState.pendingObservers.length) {
+ var pendingObservers = observationState.pendingObservers;
+ observationState.pendingObservers = new InternalArray;
+ for (var i in pendingObservers) {
+ DeliverChangeRecordsForObserver(pendingObservers[i]);
}
}
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index c2f64d4..e3226c1 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -463,13 +463,13 @@
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqAsciiString()) {
- SeqAsciiString::cast(this)->SeqAsciiStringVerify();
+ } else if (IsSeqOneByteString()) {
+ SeqOneByteString::cast(this)->SeqOneByteStringVerify();
}
}
-void SeqAsciiString::SeqAsciiStringVerify() {
+void SeqOneByteString::SeqOneByteStringVerify() {
CHECK(String::IsAscii(GetChars(), length()));
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index ab9ce1a..7db9175 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -229,7 +229,7 @@
}
-bool Object::IsSeqAsciiString() {
+bool Object::IsSeqOneByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsAsciiRepresentation();
@@ -341,8 +341,7 @@
bool String::HasOnlyAsciiChars() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kOneByteStringTag ||
- (type & kAsciiDataHintMask) == kAsciiDataHintTag;
+ return (type & kAsciiDataHintMask) == kAsciiDataHintTag;
}
@@ -1944,6 +1943,11 @@
}
+double* FixedDoubleArray::data_start() {
+ return &READ_DOUBLE_FIELD(this, kHeaderSize);
+}
+
+
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
@@ -2354,7 +2358,7 @@
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqAsciiString)
+CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ConsString)
@@ -2459,7 +2463,7 @@
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
- return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
+ return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
case kConsStringTag | kOneByteStringTag:
@@ -2486,7 +2490,7 @@
ASSERT(StringShape(this).IsSequential());
return this->IsAsciiRepresentation()
- ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
+ ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
: SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
}
@@ -2508,25 +2512,25 @@
}
-uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
+uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
+void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
-Address SeqAsciiString::GetCharsAddress() {
+Address SeqOneByteString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-char* SeqAsciiString::GetChars() {
+char* SeqOneByteString::GetChars() {
return reinterpret_cast<char*>(GetCharsAddress());
}
@@ -2558,7 +2562,7 @@
}
-int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
+int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
@@ -2976,8 +2980,8 @@
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
if (instance_type == ASCII_STRING_TYPE) {
- return SeqAsciiString::SizeFor(
- reinterpret_cast<SeqAsciiString*>(this)->length());
+ return SeqOneByteString::SizeFor(
+ reinterpret_cast<SeqOneByteString*>(this)->length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@@ -3416,66 +3420,6 @@
}
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::binary_op_result_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpResultTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_result_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpResultTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_state() {
- ASSERT(is_compare_ic_stub());
- return CompareStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_state(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_operation() {
- ASSERT(is_compare_ic_stub());
- return CompareOperationField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_operation(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareOperationField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return ToBooleanStateField::decode(
@@ -4630,10 +4574,45 @@
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
+
+
+// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
+void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::type_feedback_info() {
+ ASSERT(kind() == FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
+ ASSERT(kind() == FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
+int Code::stub_info() {
+ ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
+ Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
+ return Smi::cast(value)->value();
+}
+
+
+void Code::set_stub_info(int value) {
+ ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+}
+
+
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
+
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
@@ -4814,6 +4793,11 @@
}
+bool JSObject::HasFastElements() {
+ return IsFastElementsKind(GetElementsKind());
+}
+
+
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
@@ -5049,6 +5033,10 @@
PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
+ uint32_t index;
+ if (IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(index);
+ }
return GetPropertyAttributeWithReceiver(this, key);
}
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 71635ca..4a9dab5 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -68,7 +68,7 @@
SharedFunctionInfo::BodyDescriptor,
int>::Visit);
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
@@ -110,10 +110,7 @@
SlicedString::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
@@ -123,7 +120,7 @@
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 6ae4d7c..7082e59 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -46,7 +46,7 @@
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqAsciiString;
+ return kVisitSeqOneByteString;
} else {
return kVisitSeqTwoByteString;
}
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 3937e25..29f3cbc 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -47,7 +47,7 @@
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
@@ -318,9 +318,9 @@
return JSObjectVisitor::Visit(map, object);
}
- static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
- return SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
+ static inline int VisitSeqOneByteString(Map* map, HeapObject* object) {
+ return SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
}
static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
@@ -435,6 +435,10 @@
};
typedef FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void> FixedArrayVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
JSObject::BodyDescriptor,
void> JSObjectVisitor;
diff --git a/src/objects.cc b/src/objects.cc
index f8d080d..85b43ba 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -247,6 +247,18 @@
}
+Handle<Object> Object::GetProperty(Handle<Object> object, Handle<String> name) {
+ // TODO(rossberg): The index test should not be here but in the GetProperty
+ // method (or somewhere else entirely). Needs more global clean-up.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return GetElement(object, index);
+ Isolate* isolate = object->IsHeapObject()
+ ? Handle<HeapObject>::cast(object)->GetIsolate()
+ : Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
+}
+
+
Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
Isolate* isolate = object->IsHeapObject()
? Handle<HeapObject>::cast(object)->GetIsolate()
@@ -889,7 +901,7 @@
result = String::cast(object);
String* first = cs->first();
int first_length = first->length();
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ char* dest = SeqOneByteString::cast(result)->GetChars();
WriteToFlat(first, dest, 0, first_length);
String* second = cs->second();
WriteToFlat(second,
@@ -1678,6 +1690,7 @@
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
MaybeObject* result;
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!map_of_this->is_extensible()) {
@@ -1685,7 +1698,7 @@
return value;
} else {
Handle<Object> args[1] = {Handle<String>(name)};
- return heap->isolate()->Throw(
+ return isolate->Throw(
*FACTORY->NewTypeError("object_not_extensible",
HandleVector(args, 1)));
}
@@ -1715,11 +1728,13 @@
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this), "new", handle(name),
- handle(heap->the_hole_value()));
+ EnqueueChangeRecord(handle(this, isolate),
+ "new",
+ handle(name, isolate),
+ handle(heap->the_hole_value(), isolate));
}
return *hresult;
@@ -2855,6 +2870,7 @@
StrictModeFlag strict_mode,
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -2873,7 +2889,7 @@
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(
lookup, name_raw, value_raw, true, strict_mode);
}
@@ -2889,10 +2905,10 @@
// From this point on everything needs to be handlified, because
// SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
+ Handle<Object> value(value_raw, isolate);
if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
bool done = false;
@@ -2910,16 +2926,16 @@
if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
Handle<Object> args[] = { name, self };
- return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
+ return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
return *value;
}
}
- Handle<Object> old_value(heap->the_hole_value());
+ Handle<Object> old_value(heap->the_hole_value(), isolate);
if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup->GetLazyValue());
+ old_value = handle(lookup->GetLazyValue(), isolate);
}
// This is a real property that is not read-only, or it is a
@@ -2997,13 +3013,13 @@
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
if (lookup->IsTransition()) {
EnqueueChangeRecord(self, "new", name, old_value);
} else {
- LookupResult new_lookup(self->GetIsolate());
+ LookupResult new_lookup(isolate);
self->LocalLookup(*name, &new_lookup);
ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
@@ -3076,15 +3092,15 @@
}
// From this point on everything needs to be handlified.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
+ Handle<Object> value(value_raw, isolate);
- Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
PropertyAttributes old_attributes = ABSENT;
if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup.GetLazyValue());
+ old_value = handle(lookup.GetLazyValue(), isolate);
old_attributes = lookup.GetAttributes();
}
@@ -3146,7 +3162,7 @@
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
if (lookup.IsTransition()) {
@@ -3330,6 +3346,11 @@
return GetElementAttributeWithInterceptor(receiver, index, continue_search);
}
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) {
+ return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ }
+
return GetElementAttributeWithoutInterceptor(
receiver, index, continue_search);
}
@@ -4132,14 +4153,15 @@
Handle<JSObject> self(this);
Handle<String> name;
- Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_value;
bool preexists = false;
if (FLAG_harmony_observation && map()->is_observed()) {
name = isolate->factory()->Uint32ToString(index);
preexists = self->HasLocalElement(index);
if (preexists) {
- // TODO(observe): only read & set old_value if it's not an accessor
- old_value = Object::GetElement(self, index);
+ old_value = GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(isolate->factory()->the_hole_value())
+ : Object::GetElement(self, index);
}
}
@@ -4152,7 +4174,7 @@
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
if (preexists && !self->HasLocalElement(index))
@@ -4217,7 +4239,7 @@
Handle<Object> old_value(isolate->heap()->the_hole_value());
if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup.GetLazyValue());
+ old_value = handle(lookup.GetLazyValue(), isolate);
}
MaybeObject* result;
@@ -4239,7 +4261,7 @@
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
if (!self->HasLocalProperty(*hname))
@@ -4877,7 +4899,7 @@
if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
// From this point on everything needs to be handlified.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
Handle<Object> getter(getter_raw);
@@ -4886,20 +4908,19 @@
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
- Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
bool preexists = false;
if (FLAG_harmony_observation && map()->is_observed()) {
if (is_element) {
preexists = HasLocalElement(index);
- if (preexists) {
- // TODO(observe): distinguish the case where it's an accessor
+ if (preexists && GetLocalElementAccessorPair(index) == NULL) {
old_value = Object::GetElement(self, index);
}
} else {
LookupResult lookup(isolate);
LocalLookup(*name, &lookup);
preexists = lookup.IsProperty();
- if (preexists) old_value = handle(lookup.GetLazyValue());
+ if (preexists) old_value = handle(lookup.GetLazyValue(), isolate);
}
}
@@ -4908,7 +4929,7 @@
self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
const char* type = preexists ? "reconfigured" : "new";
@@ -6512,7 +6533,7 @@
if (shape.encoding_tag() == kOneByteStringTag) {
const char* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqAsciiString::cast(string)->GetChars();
+ start = SeqOneByteString::cast(string)->GetChars();
} else {
start = ExternalAsciiString::cast(string)->GetChars();
}
@@ -6676,7 +6697,7 @@
}
-const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
+const unibrow::byte* SeqOneByteString::SeqOneByteStringReadBlock(
unsigned* remaining,
unsigned* offset_ptr,
unsigned max_chars) {
@@ -6804,7 +6825,7 @@
}
-void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+void SeqOneByteString::SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
unsigned capacity = rbb->capacity - rbb->cursor;
@@ -6848,8 +6869,8 @@
switch (StringShape(input).representation_tag()) {
case kSeqStringTag:
if (input->IsAsciiRepresentation()) {
- SeqAsciiString* str = SeqAsciiString::cast(input);
- return str->SeqAsciiStringReadBlock(&rbb->remaining,
+ SeqOneByteString* str = SeqOneByteString::cast(input);
+ return str->SeqOneByteStringReadBlock(&rbb->remaining,
offset_ptr,
max_chars);
} else {
@@ -6997,7 +7018,7 @@
switch (StringShape(input).representation_tag()) {
case kSeqStringTag:
if (input->IsAsciiRepresentation()) {
- SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
+ SeqOneByteString::cast(input)->SeqOneByteStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
return;
@@ -7214,7 +7235,7 @@
}
case kOneByteStringTag | kSeqStringTag: {
CopyChars(sink,
- SeqAsciiString::cast(source)->GetChars() + from,
+ SeqOneByteString::cast(source)->GetChars() + from,
to - from);
return;
}
@@ -7249,9 +7270,9 @@
// common case of sequential ascii right child.
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqAsciiString()) {
+ } else if (second->IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqAsciiString::cast(second)->GetChars(),
+ SeqOneByteString::cast(second)->GetChars(),
to - boundary);
} else {
WriteToFlat(second,
@@ -7390,8 +7411,8 @@
if (StringShape(lhs).IsSequentialAscii() &&
StringShape(rhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
+ const char* str1 = SeqOneByteString::cast(lhs)->GetChars();
+ const char* str2 = SeqOneByteString::cast(rhs)->GetChars();
return CompareRawStringContents(Vector<const char>(str1, len),
Vector<const char>(str2, len));
}
@@ -7519,7 +7540,7 @@
// Compute the hash code.
uint32_t field = 0;
if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
+ field = HashSequentialString(SeqOneByteString::cast(this)->GetChars(),
len,
GetHeap()->HashSeed());
} else if (StringShape(this).IsSequentialTwoByte()) {
@@ -8781,6 +8802,7 @@
void Code::ClearTypeFeedbackCells(Heap* heap) {
+ if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
@@ -8795,7 +8817,8 @@
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
+ (is_compare_ic_stub() &&
+ ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECTS);
}
@@ -9150,11 +9173,15 @@
PrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
- CompareIC::State state = CompareIC::ComputeState(this);
- PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
- }
- if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
- Token::Value op = CompareIC::ComputeOperation(this);
+ ASSERT(major_key() == CodeStub::CompareIC);
+ CompareIC::State left_state, right_state, handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
+ &handler_state, &op);
+ PrintF(out, "compare_state = %s*%s -> %s\n",
+ CompareIC::GetStateName(left_state),
+ CompareIC::GetStateName(right_state),
+ CompareIC::GetStateName(handler_state));
PrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
@@ -9200,8 +9227,6 @@
PrintF(out, "\n");
}
PrintF(out, "\n");
- // Just print if type feedback info is ever used for optimized code.
- ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
@@ -9378,7 +9403,51 @@
MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
- return GetElementsAccessor()->SetLength(this, len);
+ if (!(FLAG_harmony_observation && map()->is_observed()))
+ return GetElementsAccessor()->SetLength(this, len);
+
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSArray> self(this);
+ List<Handle<String> > indices;
+ List<Handle<Object> > old_values;
+ Handle<Object> old_length_handle(self->length());
+ Handle<Object> new_length_handle(len);
+ uint32_t old_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ uint32_t new_length = 0;
+ if (!new_length_handle->ToArrayIndex(&new_length))
+ return Failure::InternalError();
+
+ // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
+ // Find another way to iterate over arrays with dictionary elements.
+ for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
+ PropertyAttributes attributes = self->GetLocalElementAttribute(i);
+ if (attributes == ABSENT) continue;
+ // A non-configurable property will cause the truncation operation to
+ // stop at this index.
+ if (attributes == DONT_DELETE) break;
+ // TODO(adamk): Don't fetch the old value if it's an accessor.
+ old_values.Add(Object::GetElement(self, i));
+ indices.Add(isolate->factory()->Uint32ToString(i));
+ }
+
+ MaybeObject* result =
+ self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ CHECK(self->length()->ToArrayIndex(&new_length));
+ if (old_length != new_length) {
+ for (int i = 0; i < indices.length(); ++i) {
+ JSObject::EnqueueChangeRecord(
+ self, "deleted", indices[i], old_values[i]);
+ }
+ JSObject::EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_symbol(),
+ old_length_handle);
+ }
+ return *hresult;
}
@@ -9555,112 +9624,51 @@
}
-JSObject::LocalElementType JSObject::GetLocalElementType(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return UNDEFINED_ELEMENT;
- }
+PropertyType JSObject::GetLocalPropertyType(String* name) {
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ return GetLocalElementType(index);
+ }
+ LookupResult lookup(GetIsolate());
+ LocalLookup(name, &lookup);
+ return lookup.type();
+}
+
+
+PropertyType JSObject::GetLocalElementType(uint32_t index) {
+ return GetElementsAccessor()->GetType(this, this, index);
+}
+
+
+AccessorPair* JSObject::GetLocalPropertyAccessorPair(String* name) {
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ return GetLocalElementAccessorPair(index);
}
+ LookupResult lookup(GetIsolate());
+ LocalLookupRealNamedProperty(name, &lookup);
+
+ if (lookup.IsPropertyCallbacks() &&
+ lookup.GetCallbackObject()->IsAccessorPair()) {
+ return AccessorPair::cast(lookup.GetCallbackObject());
+ }
+ return NULL;
+}
+
+
+AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return UNDEFINED_ELEMENT;
+ if (proto->IsNull()) return NULL;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetLocalElementType(index);
+ return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
}
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return GetElementAttributeWithInterceptor(this, index, false) != ABSENT
- ? INTERCEPTED_ELEMENT : UNDEFINED_ELEMENT;
- }
+ // Check for lookup interceptor.
+ if (HasIndexedInterceptor()) return NULL;
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) {
- return STRING_CHARACTER_ELEMENT;
- }
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return FAST_ELEMENT;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- if ((index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
- return FAST_ELEMENT;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index) !=
- SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- // Aliased parameters and non-aliased elements in a fast backing store
- // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
- // backing store behave as DICTIONARY_ELEMENT.
- FixedArray* parameter_map = FixedArray::cast(elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- index < (length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- } else {
- length = arguments->length();
- probe = (index < length) ? arguments->get(index) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- }
- break;
- }
- }
-
- return UNDEFINED_ELEMENT;
+ return GetElementsAccessor()->GetAccessorPair(this, this, index);
}
@@ -10267,25 +10275,21 @@
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(*self, index, v8::ACCESS_SET)) {
- heap->isolate()->ReportFailedAccessCheck(*self, v8::ACCESS_SET);
- return *value;
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return value_raw;
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return *value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
- *value,
+ value_raw,
attributes,
strict_mode,
check_prototype,
@@ -10295,7 +10299,7 @@
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { self, number };
+ Handle<Object> args[] = { handle(this, isolate), number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -10310,23 +10314,27 @@
dictionary->set_requires_slow_elements();
}
+ if (!(FLAG_harmony_observation && map()->is_observed())) {
+ return HasIndexedInterceptor()
+ ? SetElementWithInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode)
+ : SetElementWithoutInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode);
+ }
+
// From here on, everything has to be handlified.
- Handle<String> name;
- Handle<Object> old_value(isolate->heap()->the_hole_value());
- Handle<Object> old_array_length;
- PropertyAttributes old_attributes = ABSENT;
- bool preexists = false;
- if (FLAG_harmony_observation && map()->is_observed()) {
- name = isolate->factory()->Uint32ToString(index);
- preexists = self->HasLocalElement(index);
- if (preexists) {
- old_attributes = self->GetLocalPropertyAttribute(*name);
- // TODO(observe): only read & set old_value if we have a data property
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw);
+ PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ Handle<Object> old_length;
+
+ if (old_attributes != ABSENT) {
+ if (GetLocalElementAccessorPair(index) == NULL)
old_value = Object::GetElement(self, index);
- } else if (self->IsJSArray()) {
- // Store old array length in case adding an element grows the array.
- old_array_length = handle(Handle<JSArray>::cast(self)->length());
- }
+ } else if (self->IsJSArray()) {
+ // Store old array length in case adding an element grows the array.
+ old_length = handle(Handle<JSArray>::cast(self)->length(), isolate);
}
// Check for lookup interceptor
@@ -10337,25 +10345,21 @@
index, *value, attributes, strict_mode, check_prototype, set_mode);
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
- PropertyAttributes new_attributes = self->GetLocalPropertyAttribute(*name);
- if (!preexists) {
- EnqueueChangeRecord(self, "new", name, old_value);
- if (self->IsJSArray() &&
- !old_array_length->SameValue(Handle<JSArray>::cast(self)->length())) {
- EnqueueChangeRecord(self, "updated",
- isolate->factory()->length_symbol(),
- old_array_length);
- }
- } else if (new_attributes != old_attributes || old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else {
- Handle<Object> new_value = Object::GetElement(self, index);
- if (!new_value->SameValue(*old_value))
- EnqueueChangeRecord(self, "updated", name, old_value);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
+ if (old_attributes == ABSENT) {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ if (self->IsJSArray() &&
+ !old_length->SameValue(Handle<JSArray>::cast(self)->length())) {
+ EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_symbol(), old_length);
}
+ } else if (old_attributes != new_attributes || old_value->IsTheHole()) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else if (!old_value->SameValue(*Object::GetElement(self, index))) {
+ EnqueueChangeRecord(self, "updated", name, old_value);
}
return *hresult;
@@ -11575,7 +11579,7 @@
class SubStringAsciiSymbolKey : public HashTableKey {
public:
- explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
+ explicit SubStringAsciiSymbolKey(Handle<SeqOneByteString> string,
int from,
int length,
uint32_t seed)
@@ -11596,7 +11600,7 @@
// chance this is an array index.
while (i < length_ && hasher.is_array_index()) {
hasher.AddCharacter(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
+ string_->SeqOneByteStringGet(i + from_)));
i++;
}
@@ -11604,7 +11608,7 @@
// index.
while (i < length_) {
hasher.AddCharacterNoIndex(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
+ string_->SeqOneByteStringGet(i + from_)));
i++;
}
hash_field_ = hasher.GetHashField();
@@ -11632,7 +11636,7 @@
}
private:
- Handle<SeqAsciiString> string_;
+ Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
@@ -12557,10 +12561,11 @@
}
-MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
- int from,
- int length,
- Object** s) {
+MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(
+ Handle<SeqOneByteString> str,
+ int from,
+ int length,
+ Object** s) {
SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
diff --git a/src/objects.h b/src/objects.h
index 3dc6188..2881ec8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -97,7 +97,7 @@
// - ExternalFloatArray
// - String
// - SeqString
-// - SeqAsciiString
+// - SeqOneByteString
// - SeqTwoByteString
// - SlicedString
// - ConsString
@@ -530,39 +530,46 @@
enum InstanceType {
// String types.
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kSeqStringTag,
+ ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSymbolTag |
+ kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kConsStringTag,
+ CONS_ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSymbolTag |
+ kConsStringTag,
SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kOneByteStringTag | kExternalStringTag |
- kSymbolTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag |
+ kExternalStringTag | kSymbolTag |
+ kShortExternalStringTag,
EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE =
- kOneByteStringTag | kSymbolTag | kExternalStringTag,
+ kOneByteStringTag | kAsciiDataHintTag | kSymbolTag | kExternalStringTag,
STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag,
+ ASCII_STRING_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSeqStringTag,
CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag,
+ CONS_ASCII_STRING_TYPE =
+ kOneByteStringTag | kAsciiDataHintTag | kConsStringTag,
SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag,
+ SLICED_ASCII_STRING_TYPE =
+ kOneByteStringTag | kAsciiDataHintTag | kSlicedStringTag,
SHORT_EXTERNAL_STRING_TYPE =
kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kOneByteStringTag | kExternalStringTag | kShortExternalStringTag,
+ kOneByteStringTag | kAsciiDataHintTag |
+ kExternalStringTag | kShortExternalStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
// LAST_STRING_TYPE
- EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag,
+ EXTERNAL_ASCII_STRING_TYPE =
+ kOneByteStringTag | kAsciiDataHintTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
// Objects allocated in their own spaces (never in new space).
@@ -782,9 +789,9 @@
}
template<typename T>
- inline bool ToHandle(Handle<T>* obj) {
+ inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
if (IsFailure()) return false;
- *obj = handle(T::cast(reinterpret_cast<Object*>(this)));
+ *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
return true;
}
@@ -822,7 +829,7 @@
V(ExternalTwoByteString) \
V(ExternalAsciiString) \
V(SeqTwoByteString) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
\
V(ExternalArray) \
V(ExternalByteArray) \
@@ -966,6 +973,7 @@
String* key,
PropertyAttributes* attributes);
+ static Handle<Object> GetProperty(Handle<Object> object, Handle<String> key);
static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
@@ -1567,6 +1575,8 @@
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
+ // Returns true if an object has any of the fast elements kinds.
+ inline bool HasFastElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
@@ -1835,25 +1845,12 @@
return old_capacity + (old_capacity >> 1) + 16;
}
- // Tells whether the index'th element is present and how it is stored.
- enum LocalElementType {
- // There is no element with given index.
- UNDEFINED_ELEMENT,
+ PropertyType GetLocalPropertyType(String* name);
+ PropertyType GetLocalElementType(uint32_t index);
- // Element with given index is handled by interceptor.
- INTERCEPTED_ELEMENT,
-
- // Element with given index is character in string.
- STRING_CHARACTER_ELEMENT,
-
- // Element with given index is stored in fast backing store.
- FAST_ELEMENT,
-
- // Element with given index is stored in slow backing store.
- DICTIONARY_ELEMENT
- };
-
- LocalElementType GetLocalElementType(uint32_t index);
+ // These methods do not perform access checks!
+ AccessorPair* GetLocalPropertyAccessorPair(String* name);
+ AccessorPair* GetLocalElementAccessorPair(uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
@@ -1880,7 +1877,7 @@
StrictModeFlag strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
- static MUST_USE_RESULT Handle<Object> SetElement(
+ static Handle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
@@ -2372,12 +2369,12 @@
inline void set_unchecked(Heap* heap, int index, Object* value,
WriteBarrierMode mode);
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2479,6 +2476,9 @@
return kHeaderSize + length * kDoubleSize;
}
+ // Gives access to raw memory which stores the array's data.
+ inline double* data_start();
+
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
@@ -3020,7 +3020,7 @@
static const int kEntrySize = 1;
};
-class SeqAsciiString;
+class SeqOneByteString;
// SymbolTable.
//
@@ -3036,7 +3036,7 @@
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
Object** s);
MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
- Handle<SeqAsciiString> str,
+ Handle<SeqOneByteString> str,
int from,
int length,
Object** s);
@@ -4306,8 +4306,12 @@
DECL_ACCESSORS(deoptimization_data, FixedArray)
// [type_feedback_info]: Struct containing type feedback information.
- // Will contain either a TypeFeedbackInfo object, or undefined.
+ // STUBs can use this slot to store arbitrary information as a Smi.
+ // Will contain either a TypeFeedbackInfo object, or undefined, or a Smi.
DECL_ACCESSORS(type_feedback_info, Object)
+ inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ inline int stub_info();
+ inline void set_stub_info(int info);
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -4413,21 +4417,6 @@
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
- // [type-recording binary op type]: For kind BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
- inline byte binary_op_result_type();
- inline void set_binary_op_result_type(byte value);
-
- // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
- inline byte compare_state();
- inline void set_compare_state(byte value);
-
- // [compare_operation]: For kind COMPARE_IC tells what compare operation the
- // stub was generated for.
- inline byte compare_operation();
- inline void set_compare_operation(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
@@ -4637,18 +4626,6 @@
static const int kUnaryOpTypeFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kUnaryOpTypeBitCount = 3;
- static const int kBinaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kBinaryOpTypeBitCount = 3;
- static const int kBinaryOpResultTypeFirstBit =
- kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
- static const int kBinaryOpResultTypeBitCount = 3;
- static const int kCompareStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kCompareStateBitCount = 3;
- static const int kCompareOperationFirstBit =
- kCompareStateFirstBit + kCompareStateBitCount;
- static const int kCompareOperationBitCount = 4;
static const int kToBooleanStateFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kToBooleanStateBitCount = 8;
@@ -4658,11 +4635,6 @@
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
- kBinaryOpResultTypeBitCount <= 32);
- STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
- STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
@@ -4670,14 +4642,6 @@
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
class UnaryOpTypeField: public BitField<int,
kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpTypeField: public BitField<int,
- kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpResultTypeField: public BitField<int,
- kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
- class CompareStateField: public BitField<int,
- kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
- class CompareOperationField: public BitField<int,
- kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
class ToBooleanStateField: public BitField<int,
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
@@ -7600,13 +7564,13 @@
// The AsciiString class captures sequential ASCII string objects.
// Each character in the AsciiString is an ASCII character.
-class SeqAsciiString: public SeqString {
+class SeqOneByteString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
// Dispatched behavior.
- inline uint16_t SeqAsciiStringGet(int index);
- inline void SeqAsciiStringSet(int index, uint16_t value);
+ inline uint16_t SeqOneByteStringGet(int index);
+ inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
inline Address GetCharsAddress();
@@ -7614,12 +7578,12 @@
inline char* GetChars();
// Casting
- static inline SeqAsciiString* cast(Object* obj);
+ static inline SeqOneByteString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an AsciiString
// instance.
- inline int SeqAsciiStringSize(InstanceType instance_type);
+ inline int SeqOneByteStringSize(InstanceType instance_type);
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
@@ -7633,17 +7597,17 @@
static const int kMaxLength = (kMaxSize - kHeaderSize);
// Support for StringInputBuffer.
- inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ inline void SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset,
unsigned chars);
- inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
+ inline const unibrow::byte* SeqOneByteStringReadBlock(unsigned* remaining,
unsigned* offset,
unsigned chars);
- DECLARE_VERIFIER(SeqAsciiString)
+ DECLARE_VERIFIER(SeqOneByteString)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
};
@@ -8381,6 +8345,7 @@
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
// Set the content of the array to the content of storage.
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 06018dd..48ee12c 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -72,7 +72,13 @@
USE(status);
output_queue_.Enqueue(optimizing_compiler);
- isolate_->stack_guard()->RequestCodeReadyEvent();
+ if (!FLAG_manual_parallel_recompilation) {
+ isolate_->stack_guard()->RequestCodeReadyEvent();
+ } else {
+ // In manual mode, do not trigger a code ready event.
+ // Instead, wait for the optimized functions to be installed manually.
+ output_queue_semaphore_->Signal();
+ }
if (FLAG_trace_parallel_recompilation) {
time_spent_compiling_ += OS::Ticks() - compiling_start;
@@ -99,6 +105,9 @@
HandleScope handle_scope(isolate_);
int functions_installed = 0;
while (!output_queue_.IsEmpty()) {
+ if (FLAG_manual_parallel_recompilation) {
+ output_queue_semaphore_->Wait();
+ }
OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler);
Compiler::InstallOptimizedCode(compiler);
@@ -110,6 +119,17 @@
}
+Handle<SharedFunctionInfo>
+ OptimizingCompilerThread::InstallNextOptimizedFunction() {
+ ASSERT(FLAG_manual_parallel_recompilation);
+ output_queue_semaphore_->Wait();
+ OptimizingCompiler* compiler = NULL;
+ output_queue_.Dequeue(&compiler);
+ Compiler::InstallOptimizedCode(compiler);
+ return compiler->info()->shared_info();
+}
+
+
void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* optimizing_compiler) {
input_queue_.Enqueue(optimizing_compiler);
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index d562726..8a39b39 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -29,8 +29,8 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "atomicops.h"
-#include "platform.h"
#include "flags.h"
+#include "platform.h"
#include "unbound-queue.h"
namespace v8 {
@@ -38,6 +38,7 @@
class HGraphBuilder;
class OptimizingCompiler;
+class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
public:
@@ -46,6 +47,7 @@
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)),
+ output_queue_semaphore_(OS::CreateSemaphore(0)),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@@ -57,6 +59,9 @@
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
+ // Wait for the next optimized function and install it.
+ Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
+
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
// after.
@@ -77,6 +82,7 @@
~OptimizingCompilerThread() {
delete input_queue_semaphore_;
+ delete output_queue_semaphore_; // Only used for manual mode.
delete stop_semaphore_;
}
@@ -84,6 +90,7 @@
Isolate* isolate_;
Semaphore* stop_semaphore_;
Semaphore* input_queue_semaphore_;
+ Semaphore* output_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
volatile AtomicWord stop_thread_;
diff --git a/src/parser.cc b/src/parser.cc
index 129bd95..16f41c2 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3203,7 +3203,8 @@
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->ToBoolean()->IsTrue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
+ Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
+ isolate());
return factory()->NewLiteral(result);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
@@ -3715,7 +3716,6 @@
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ELEMENTS;
- bool has_only_undefined_values = true;
bool has_hole_values = false;
// Fill in the literals.
@@ -3747,7 +3747,6 @@
// FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
// the tagged value, no matter what the ElementsKind is in case we
// ultimately end up in FAST_ELEMENTS.
- has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@@ -3786,13 +3785,6 @@
}
}
- // Very small array literals that don't have a concrete hint about their type
- // from a constant value should default to the slow case to avoid lots of
- // elements transitions on really small objects.
- if (has_only_undefined_values && values->length() <= 2) {
- elements_kind = FAST_ELEMENTS;
- }
-
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 24e256a..028aae3 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -699,7 +699,7 @@
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
@@ -720,6 +720,7 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -774,6 +775,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -788,4 +794,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 1da4605..3ec02b7 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -685,7 +685,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -707,6 +707,7 @@
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -890,6 +891,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -904,4 +910,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index ec48d63..5609af0 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1025,6 +1025,7 @@
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
+
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
@@ -1039,7 +1040,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -1075,16 +1076,74 @@
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
+class CpuProfilerSignalHandler {
+ public:
+ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ ScopedLock lock(mutex_);
+ if (signal_handler_installed_counter_ > 0) {
+ signal_handler_installed_counter_++;
+ return;
+ }
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) {
+ signal_handler_installed_counter_++;
+ }
+ }
+
+ static void RestoreSignalHandler() {
+ ScopedLock lock(mutex_);
+ if (signal_handler_installed_counter_ == 0)
+ return;
+ if (signal_handler_installed_counter_ == 1) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ }
+ signal_handler_installed_counter_--;
+ }
+
+ static bool signal_handler_installed() {
+ return signal_handler_installed_counter_ > 0;
+ }
+
+ private:
+ static int signal_handler_installed_counter_;
+ static struct sigaction old_signal_handler_;
+ static Mutex* mutex_;
+};
+
+
+int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0;
+struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
+Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
+
+
class Sampler::PlatformData : public Malloced {
public:
- PlatformData() : vm_tid_(GetThreadID()) {}
+ PlatformData()
+ : vm_tgid_(getpid()),
+ vm_tid_(GetThreadID()) {}
- int vm_tid() const { return vm_tid_; }
+ void SendProfilingSignal() {
+ if (!CpuProfilerSignalHandler::signal_handler_installed()) return;
+ // Glibc doesn't provide a wrapper for tgkill(2).
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#else
+ syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#endif
+ }
private:
+ const int vm_tgid_;
const int vm_tid_;
};
@@ -1100,28 +1159,11 @@
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
@@ -1142,7 +1184,6 @@
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
- RestoreSignalHandler();
}
}
@@ -1154,18 +1195,13 @@
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
Sleep(HALF_INTERVAL);
@@ -1175,8 +1211,7 @@
Sleep(HALF_INTERVAL);
} else {
if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
}
@@ -1191,10 +1226,9 @@
}
}
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ static void DoCpuProfile(Sampler* sampler, void*) {
if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ sampler->platform_data()->SendProfilingSignal();
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
@@ -1202,16 +1236,6 @@
sampler->isolate()->runtime_profiler()->NotifyTick();
}
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
-#endif
- }
-
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
@@ -1234,15 +1258,12 @@
#endif // ANDROID
}
- const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
@@ -1251,8 +1272,6 @@
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
@@ -1280,11 +1299,13 @@
}
#endif
SignalSender::SetUp();
+ CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
+ CpuProfilerSignalHandler::TearDown();
delete limit_mutex;
}
@@ -1294,6 +1315,7 @@
interval_(interval),
profiling_(false),
active_(false),
+ has_processing_thread_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
@@ -1305,6 +1327,11 @@
}
+void Sampler::DoSample() {
+ platform_data()->SendProfilingSignal();
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -1319,4 +1346,14 @@
}
+void Sampler::StartSampling() {
+ CpuProfilerSignalHandler::InstallSignalHandler();
+}
+
+
+void Sampler::StopSampling() {
+ CpuProfilerSignalHandler::RestoreSignalHandler();
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 22d2bcf..01dcb1d 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -825,7 +825,7 @@
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
@@ -863,6 +863,7 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
thread_resume(profiled_thread);
}
@@ -915,6 +916,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -929,4 +935,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index ccd2123..a42c5d4 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -514,4 +514,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 292927b..b9f133e 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -738,7 +738,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -768,6 +768,7 @@
#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -970,6 +971,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -984,4 +990,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 5652741..7431e80 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -672,7 +672,7 @@
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
@@ -686,6 +686,7 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
class Sampler::PlatformData : public Malloced {
@@ -889,6 +890,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -902,4 +908,13 @@
SetActive(false);
}
+
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index bb3ff2a..27433b2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -2045,7 +2045,7 @@
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
@@ -2066,6 +2066,7 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -2120,6 +2121,11 @@
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
@@ -2134,4 +2140,12 @@
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index 2ac63ff..d4c91b4 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -749,6 +749,9 @@
IncSamplesTaken();
}
+ // Performs platform-specific stack sampling.
+ void DoSample();
+
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
@@ -757,10 +760,28 @@
void Start();
void Stop();
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ // Perform platform-specific initialization before DoSample() may be invoked.
+ void StartSampling();
+ // Perform platform-specific cleanup after samping.
+ void StopSampling();
+ void IncreaseProfilingDepth() {
+ if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) {
+ StartSampling();
+ }
+ }
+ void DecreaseProfilingDepth() {
+ if (!NoBarrier_AtomicIncrement(&profiling_, -1)) {
+ StopSampling();
+ }
+ }
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
@@ -787,6 +808,7 @@
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
+ Atomic32 has_processing_thread_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 25ce2ba..85b6544 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1774,7 +1774,14 @@
const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
switch (object->map()->instance_type()) {
- case MAP_TYPE: return "system / Map";
+ case MAP_TYPE:
+ switch (Map::cast(object)->instance_type()) {
+#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
+ case instance_type: return "system / Map (" #Name ")";
+ STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
+#undef MAKE_STRING_MAP_CASE
+ default: return "system / Map";
+ }
case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
case FOREIGN_TYPE: return "system / Foreign";
case ODDBALL_TYPE: return "system / Oddball";
@@ -1851,7 +1858,6 @@
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
ExtractStringReferences(entry, String::cast(obj));
- extract_indexed_refs = false;
} else if (obj->IsContext()) {
ExtractContextReferences(entry, Context::cast(obj));
} else if (obj->IsMap()) {
@@ -1966,11 +1972,14 @@
void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
if (string->IsConsString()) {
ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first());
- SetInternalReference(cs, entry, "second", cs->second());
+ SetInternalReference(cs, entry, "first", cs->first(),
+ ConsString::kFirstOffset);
+ SetInternalReference(cs, entry, "second", cs->second(),
+ ConsString::kSecondOffset);
} else if (string->IsSlicedString()) {
SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent());
+ SetInternalReference(ss, entry, "parent", ss->parent(),
+ SlicedString::kParentOffset);
}
}
@@ -2130,9 +2139,11 @@
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
+ if (code->kind() == Code::FUNCTION) {
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
+ }
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 82ba34d..2ce5cfb 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -83,8 +83,8 @@
const char* data = ExternalAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
- ASSERT(subject->IsSeqAsciiString());
- char* data = SeqAsciiString::cast(subject)->GetChars();
+ ASSERT(subject->IsSeqOneByteString());
+ char* data = SeqOneByteString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
}
return address + start_index;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 23f41fa..5b27333 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -140,6 +140,9 @@
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
+ // If we are in manual mode, don't auto-optimize anything.
+ if (FLAG_manual_parallel_recompilation) return;
+
if (FLAG_trace_opt) {
PrintF("[marking ");
function->PrintName();
diff --git a/src/runtime.cc b/src/runtime.cc
index de74f2c..b16c685 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -977,97 +977,109 @@
Object* proto = obj->GetPrototype();
if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
GetOwnPropertyImplementation(JSObject::cast(proto),
name, result);
+ }
}
-static bool CheckAccessException(LookupResult* result,
+static bool CheckAccessException(Object* callback,
v8::AccessType access_type) {
- if (result->type() == CALLBACKS) {
- Object* callback = result->GetCallbackObject();
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- bool can_access =
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- return can_access;
- }
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
}
-
return false;
}
-static bool CheckAccess(JSObject* obj,
- String* name,
- LookupResult* result,
- v8::AccessType access_type) {
- ASSERT(result->IsProperty());
-
- JSObject* holder = result->holder();
- JSObject* current = obj;
- Isolate* isolate = obj->GetIsolate();
- while (true) {
+template<class Key>
+static bool CheckGenericAccess(
+ JSObject* receiver,
+ JSObject* holder,
+ Key key,
+ v8::AccessType access_type,
+ bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
+ Isolate* isolate = receiver->GetIsolate();
+ for (JSObject* current = receiver;
+ true;
+ current = JSObject::cast(current->GetPrototype())) {
if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(current, name, access_type)) {
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
- break;
+ !(isolate->*mayAccess)(current, key, access_type)) {
+ return false;
}
+ if (current == holder) break;
+ }
+ return true;
+}
- if (current == holder) {
- return true;
- }
- current = JSObject::cast(current->GetPrototype());
+static bool CheckElementAccess(
+ JSObject* obj,
+ uint32_t index,
+ v8::AccessType access_type) {
+ // TODO(1095): we should traverse hidden prototype hierachy as well.
+ if (CheckGenericAccess(
+ obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+ return true;
}
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ return false;
+}
+
+
+static bool CheckPropertyAccess(
+ JSObject* obj,
+ String* name,
+ v8::AccessType access_type) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ return CheckElementAccess(obj, index, access_type);
+ }
+
+ LookupResult lookup(obj->GetIsolate());
+ obj->LocalLookup(name, &lookup);
+
+ if (CheckGenericAccess<Object*>(
+ obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
+ return true;
+ }
+
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
// API callbacks can have per callback access exceptions.
- switch (result->type()) {
- case CALLBACKS: {
- if (CheckAccessException(result, access_type)) {
+ switch (lookup.type()) {
+ case CALLBACKS:
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
return true;
}
break;
- }
- case INTERCEPTOR: {
+ case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- holder->LookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (CheckAccessException(result, access_type)) {
+ lookup.holder()->LookupRealNamedProperty(name, &lookup);
+ if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
return true;
}
}
break;
- }
default:
break;
}
- isolate->ReportFailedAccessCheck(current, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
return false;
}
-// TODO(1095): we should traverse hidden prototype hierachy as well.
-static bool CheckElementAccess(JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- if (obj->IsAccessCheckNeeded() &&
- !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
- return false;
- }
-
- return true;
-}
-
-
// Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX,
@@ -1085,141 +1097,33 @@
Handle<JSObject> obj,
Handle<String> name) {
Heap* heap = isolate->heap();
+ PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ if (attrs == ABSENT) return heap->undefined_value();
+ AccessorPair* accessors = obj->GetLocalPropertyAccessorPair(*name);
+
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
- LookupResult result(isolate);
- // This could be an element.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- switch (obj->GetLocalElementType(index)) {
- case JSObject::UNDEFINED_ELEMENT:
- return heap->undefined_value();
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
+ elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(accessors != NULL));
- case JSObject::STRING_CHARACTER_ELEMENT: {
- // Special handling of string objects according to ECMAScript 5
- // 15.5.5.2. Note that this might be a string object with elements
- // other than the actual string value. This is covered by the
- // subsequent cases.
- Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
- Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
-
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, heap->false_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->false_value());
- return *desc;
- }
-
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->true_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->true_value());
- return *desc;
- }
-
- case JSObject::DICTIONARY_ELEMENT: {
- Handle<JSObject> holder = obj;
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return heap->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- holder = Handle<JSObject>(JSObject::cast(proto));
- }
- FixedArray* elements = FixedArray::cast(holder->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == heap->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- switch (details.type()) {
- case CALLBACKS: {
- // This is an accessor property with getter and/or setter.
- AccessorPair* accessors =
- AccessorPair::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
- if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
- }
- if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
- }
- break;
- }
- case NORMAL: {
- // This is a data property.
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- ASSERT(!value.is_null());
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
- return *desc;
- }
- }
- }
-
- // Use recursive implementation to also traverse hidden prototypes
- GetOwnPropertyImplementation(*obj, *name, &result);
-
- if (!result.IsProperty()) {
- return heap->undefined_value();
- }
-
- if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return heap->false_value();
- }
-
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
-
- bool is_js_accessor = result.IsPropertyCallbacks() &&
- (result.GetCallbackObject()->IsAccessorPair());
-
- if (is_js_accessor) {
- // __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
-
- AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
+ if (accessors == NULL) {
+ elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
+ // GetProperty does access check.
+ elms->set(VALUE_INDEX, *GetProperty(obj, name));
+ } else {
+ // Access checks are performed for both accessors separately.
+ // When they fail, the respective field is not set in the descriptor.
Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+ Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
+ if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, getter);
}
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+ if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
elms->set(SETTER_INDEX, setter);
}
- } else {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
-
- PropertyAttributes attrs;
- Object* value;
- // GetProperty will check access and report any violations.
- { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- elms->set(VALUE_INDEX, value);
}
- return *desc;
+ return *isolate->factory()->NewJSArrayWithElements(elms);
}
@@ -2515,7 +2419,7 @@
Handle<String> joined_string;
if (is_ascii_) {
- Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
+ Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
AssertNoAllocation no_alloc;
char* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -2546,8 +2450,8 @@
}
private:
- Handle<SeqAsciiString> NewRawAsciiString(int length) {
- return heap_->isolate()->factory()->NewRawAsciiString(length);
+ Handle<SeqOneByteString> NewRawOneByteString(int length) {
+ return heap_->isolate()->factory()->NewRawOneByteString(length);
}
@@ -2996,7 +2900,7 @@
Handle<ResultSeqString> result;
if (ResultSeqString::kHasAsciiEncoding) {
result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(result_len));
+ isolate->factory()->NewRawOneByteString(result_len));
} else {
result = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(result_len));
@@ -3065,7 +2969,7 @@
regexp->TypeTag() == JSRegExp::ATOM &&
simple_replace) {
if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ return StringReplaceAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
@@ -3153,9 +3057,9 @@
// Shortcut for simple non-regexp global replacements
if (is_global &&
regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string(HEAP->empty_string());
+ Handle<String> empty_string = isolate->factory()->empty_string();
if (subject->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ return StringReplaceAtomRegExpWithString<SeqOneByteString>(
isolate,
subject,
regexp,
@@ -3191,7 +3095,7 @@
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(new_length));
+ isolate->factory()->NewRawOneByteString(new_length));
} else {
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
@@ -3283,7 +3187,7 @@
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
- return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
+ return StringReplaceRegExpWithEmptyString<SeqOneByteString>(
isolate, subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
@@ -4102,8 +4006,7 @@
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastElementsKind(elements_kind) &&
- !IsFastObjectElementsKind(elements_kind)) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
@@ -4116,6 +4019,9 @@
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ !IsFastElementsKind(elements_kind));
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4725,41 +4631,6 @@
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(String, key, 1);
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- JSObject::LocalElementType type = object->GetLocalElementType(index);
- switch (type) {
- case JSObject::UNDEFINED_ELEMENT:
- case JSObject::STRING_CHARACTER_ELEMENT:
- return isolate->heap()->false_value();
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT:
- return isolate->heap()->true_value();
- case JSObject::DICTIONARY_ELEMENT: {
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- if (proto->IsNull()) {
- return isolate->heap()->false_value();
- }
- ASSERT(proto->IsJSGlobalObject());
- object = JSObject::cast(proto);
- }
- FixedArray* elements = FixedArray::cast(object->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- return isolate->heap()->ToBoolean(!details.IsDontEnum());
- }
- }
- }
-
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
}
@@ -5159,10 +5030,10 @@
// Fast case: short integer or some sorts of junk values.
int len = subject->length();
- if (subject->IsSeqAsciiString()) {
+ if (subject->IsSeqOneByteString()) {
if (len == 0) return Smi::FromInt(0);
- char const* data = SeqAsciiString::cast(subject)->GetChars();
+ char const* data = SeqOneByteString::cast(subject)->GetChars();
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -5529,7 +5400,7 @@
template <>
-MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
+MaybeObject* AllocateRawString<SeqOneByteString>(Isolate* isolate, int length) {
return isolate->heap()->AllocateRawAsciiString(length);
}
@@ -5681,7 +5552,7 @@
return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString, false>(isolate,
+ return QuoteJsonString<char, SeqOneByteString, false>(isolate,
flat.ToAsciiVector());
}
}
@@ -5704,7 +5575,7 @@
return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString, true>(isolate,
+ return QuoteJsonString<char, SeqOneByteString, true>(isolate,
flat.ToAsciiVector());
}
}
@@ -5796,7 +5667,7 @@
}
if (ascii) {
- return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
+ return QuoteJsonStringArray<char, SeqOneByteString>(isolate,
elements,
worst_case_length);
} else {
@@ -6093,14 +5964,14 @@
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqAsciiString()) {
+ if (s->IsSeqOneByteString()) {
Object* o;
{ MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
- SeqAsciiString* result = SeqAsciiString::cast(o);
+ SeqOneByteString* result = SeqOneByteString::cast(o);
bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
+ result->GetChars(), SeqOneByteString::cast(s)->GetChars(), length);
return has_changed_character ? result : s;
}
@@ -6678,7 +6549,7 @@
isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
- SeqAsciiString* answer = SeqAsciiString::cast(object);
+ SeqOneByteString* answer = SeqOneByteString::cast(object);
StringBuilderConcatHelper(special,
answer->GetChars(),
fixed_array,
@@ -6840,7 +6711,7 @@
bool is_ascii = separator->IsAsciiRepresentation();
int max_string_length;
if (is_ascii) {
- max_string_length = SeqAsciiString::kMaxLength;
+ max_string_length = SeqOneByteString::kMaxLength;
} else {
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6892,8 +6763,8 @@
MaybeObject* result_allocation =
isolate->heap()->AllocateRawAsciiString(string_length);
if (result_allocation->IsFailure()) return result_allocation;
- SeqAsciiString* result_string =
- SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
+ SeqOneByteString* result_string =
+ SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
JoinSparseArrayWithSeparator<char>(elements,
elements_length,
array_length,
@@ -7988,7 +7859,34 @@
HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation);
Compiler::RecompileParallel(args.at<JSFunction>(0));
- return *isolate->factory()->undefined_value();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
+ if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ return isolate->Throw(
+ *isolate->factory()->LookupAsciiSymbol("Recompile queue is full."));
+ }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
+ Compiler::RecompileParallel(fun);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
+ if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
+ Handle<SharedFunctionInfo> shared(fun->shared());
+ while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
+ return isolate->heap()->undefined_value();
}
@@ -9064,7 +8962,7 @@
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
- if (source->IsSeqAsciiString()) {
+ if (source->IsSeqOneByteString()) {
result = JsonParser<true>::Parse(source, zone);
} else {
result = JsonParser<false>::Parse(source, zone);
@@ -9303,7 +9201,7 @@
clear_storage();
set_storage(*result);
}
-}
+ }
void increase_index_offset(uint32_t delta) {
if (JSObject::kMaxElementCount - index_offset_ < delta) {
@@ -9394,10 +9292,22 @@
break;
}
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ if (array->elements()->IsFixedArray()) {
+ ASSERT(FixedArray::cast(array->elements())->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->is_the_hole(i)) element_count++;
+ }
break;
+ }
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(array->elements()));
@@ -9640,8 +9550,27 @@
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(receiver->elements()));
+ int fast_length = static_cast<int>(length);
+ ASSERT(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope(isolate);
+ if (!elements->is_the_hole(j)) {
+ double double_value = elements->get_scalar(j);
+ Handle<Object> element_value =
+ isolate->factory()->NewNumber(double_value);
+ visitor->visit(j, element_value);
+ } else if (receiver->HasElement(j)) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ Handle<Object> element_value = Object::GetElement(receiver, j);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+ visitor->visit(j, element_value);
+ }
+ }
break;
}
case DICTIONARY_ELEMENTS: {
@@ -9744,48 +9673,51 @@
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
+ ElementsKind kind = FAST_SMI_ELEMENTS;
+
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
- {
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope;
- Handle<Object> obj(elements->get(i));
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- // TODO(1810): Find out if it's worthwhile to properly support
- // arbitrary ElementsKinds. For now, pessimistically transition to
- // FAST_*_ELEMENTS.
- if (array->HasFastDoubleElements()) {
- ElementsKind to_kind = FAST_ELEMENTS;
- if (array->HasFastHoleyElements()) {
- to_kind = FAST_HOLEY_ELEMENTS;
- }
- array = Handle<JSArray>::cast(
- JSObject::TransitionElementsKind(array, to_kind));
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope;
+ Handle<Object> obj(elements->get(i));
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate = static_cast<uint32_t>(array->length()->Number());
+ if (length_estimate != 0) {
+ ElementsKind array_kind =
+ GetPackedElementsKind(array->map()->elements_kind());
+ if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
+ kind = array_kind;
}
- length_estimate =
- static_cast<uint32_t>(array->length()->Number());
- element_estimate =
- EstimateElementCount(array);
- } else {
- length_estimate = 1;
- element_estimate = 1;
}
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
+ element_estimate = EstimateElementCount(array);
+ } else {
+ if (obj->IsHeapObject()) {
+ if (obj->IsNumber()) {
+ if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
+ kind = FAST_DOUBLE_ELEMENTS;
+ }
+ } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
+ kind = FAST_ELEMENTS;
+ }
}
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
- }
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length <
+ length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements <
+ element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
}
}
@@ -9796,8 +9728,76 @@
Handle<FixedArray> storage;
if (fast_case) {
- // The backing storage array must have non-existing elements to
- // preserve holes across concat operations.
+ if (kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedDoubleArray> double_storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ bool failure = false;
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj(elements->get(i));
+ if (obj->IsSmi()) {
+ double_storage->set(j, Smi::cast(*obj)->value());
+ j++;
+ } else if (obj->IsNumber()) {
+ double_storage->set(j, obj->Number());
+ j++;
+ } else {
+ JSArray* array = JSArray::cast(*obj);
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ switch (array->map()->elements_kind()) {
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty fixed array indicates that there are no elements.
+ if (array->elements()->IsFixedArray()) break;
+ FixedDoubleArray* elements =
+ FixedDoubleArray::cast(array->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (elements->is_the_hole(i)) {
+ failure = true;
+ break;
+ }
+ double double_value = elements->get_scalar(i);
+ double_storage->set(j, double_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* elements(
+ FixedArray::cast(array->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Object* element = elements->get(i);
+ if (element->IsTheHole()) {
+ failure = true;
+ break;
+ }
+ int32_t int_value = Smi::cast(element)->value();
+ double_storage->set(j, int_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ ASSERT_EQ(0, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (failure) break;
+ }
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = isolate->factory()->GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*double_storage);
+ return *array;
+ }
+ // The backing storage array must have non-existing elements to preserve
+ // holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
estimate_result_length);
} else {
diff --git a/src/runtime.h b/src/runtime.h
index caed34b..146b8dc 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -85,7 +85,9 @@
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
+ F(ForceParallelRecompile, 1, 1) \
+ F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
diff --git a/src/spaces.cc b/src/spaces.cc
index 583b2ca..0ac23d2 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -448,6 +448,7 @@
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+ chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
@@ -2784,7 +2785,8 @@
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
previous = current;
current = current->next_page();
} else {
diff --git a/src/spaces.h b/src/spaces.h
index 9121e9c..4fbabd6 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -397,6 +397,12 @@
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -480,6 +486,23 @@
write_barrier_counter_ = counter;
}
+ int progress_bar() {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ return progress_bar_;
+ }
+
+ void set_progress_bar(int progress_bar) {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ progress_bar_ = progress_bar;
+ }
+
+ void ResetProgressBar() {
+ if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ set_progress_bar(0);
+ ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ }
+
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
@@ -505,7 +528,7 @@
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kPointerSize;
+ kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -649,6 +672,9 @@
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
+ // Used by the incremental marker to keep track of the scanning progress in
+ // large objects that have a progress bar and are scanned in increments.
+ int progress_bar_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index fcb8a4e..1f708b3 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -120,7 +120,8 @@
// name specific if there are global objects involved.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -139,7 +140,8 @@
PropertyIndex field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -160,7 +162,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -180,7 +183,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -200,7 +204,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -219,7 +224,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -245,7 +251,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -265,7 +272,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -285,7 +293,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -304,7 +313,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -324,7 +334,8 @@
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -341,7 +352,8 @@
Handle<JSArray> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -358,7 +370,7 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Map> map(receiver->map());
- Handle<Object> probe(map->FindInCodeCache(*name, flags));
+ Handle<Object> probe(map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -375,7 +387,8 @@
Handle<JSFunction> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -396,7 +409,8 @@
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -438,7 +452,7 @@
UNREACHABLE();
break;
}
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
Handle<Code> code;
@@ -490,7 +504,8 @@
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::NORMAL, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -510,7 +525,8 @@
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -530,7 +546,8 @@
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -548,7 +565,8 @@
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -568,7 +586,8 @@
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedStoreStubCompiler compiler(isolate(), strict_mode,
@@ -610,7 +629,8 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -648,7 +668,8 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -685,7 +706,8 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -715,7 +737,8 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
diff --git a/src/type-info.cc b/src/type-info.cc
index bc6a46b..7a9a5de 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -79,7 +79,7 @@
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry))
+ ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
: Handle<Object>::cast(isolate_->factory()->undefined_value());
}
@@ -312,43 +312,53 @@
}
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+static TypeInfo TypeFromCompareType(CompareIC::State state) {
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
return TypeInfo::Uninitialized();
- case CompareIC::SMIS:
+ case CompareIC::SMI:
return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
+ case CompareIC::HEAP_NUMBER:
return TypeInfo::Number();
- case CompareIC::SYMBOLS:
- case CompareIC::STRINGS:
+ case CompareIC::SYMBOL:
+ return TypeInfo::Symbol();
+ case CompareIC::STRING:
return TypeInfo::String();
- case CompareIC::OBJECTS:
+ case CompareIC::OBJECT:
case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
default:
- return unknown;
+ return TypeInfo::Unknown();
}
}
-bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+void TypeFeedbackOracle::CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type) {
Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return false;
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return false;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- return state == CompareIC::SYMBOLS;
+ if (!code->is_compare_ic_stub()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
+
+ int stub_minor_key = code->stub_info();
+ CompareIC::State left_state, right_state, handler_state;
+ ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
+ &handler_state, NULL);
+ *left_type = TypeFromCompareType(left_state);
+ *right_type = TypeFromCompareType(right_state);
+ *overall_type = TypeFromCompareType(handler_state);
}
@@ -357,7 +367,7 @@
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
if (state != CompareIC::KNOWN_OBJECTS) {
return Handle<Map>::null();
}
@@ -388,55 +398,44 @@
}
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
+static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
+ switch (binary_type) {
+ // Uninitialized means never executed.
+ case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
+ case BinaryOpIC::SMI: return TypeInfo::Smi();
+ case BinaryOpIC::INT32: return TypeInfo::Integer32();
+ case BinaryOpIC::HEAP_NUMBER: return TypeInfo::Double();
+ case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
+ case BinaryOpIC::STRING: return TypeInfo::String();
+ case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
+}
+
+
+void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result) {
Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
+ if (!object->IsCode()) {
+ *left = *right = *result = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_result_type());
-
- switch (type) {
- case BinaryOpIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI:
- switch (result_type) {
- case BinaryOpIC::UNINITIALIZED:
- if (expr->op() == Token::DIV) {
- return TypeInfo::Double();
- }
- return TypeInfo::Smi();
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- case BinaryOpIC::INT32:
- if (expr->op() == Token::DIV ||
- result_type == BinaryOpIC::HEAP_NUMBER) {
- return TypeInfo::Double();
- }
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
- return TypeInfo::String();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
+ BinaryOpIC::TypeInfo left_type, right_type, result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &result_type);
+ *left = TypeFromBinaryOpType(left_type);
+ *right = TypeFromBinaryOpType(right_type);
+ *result = TypeFromBinaryOpType(result_type);
+ return;
}
- return unknown;
+ // Not a binary op stub.
+ *left = *right = *result = unknown;
}
@@ -447,28 +446,8 @@
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return unknown;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::STRINGS:
- return TypeInfo::String();
- case CompareIC::SYMBOLS:
- return TypeInfo::Symbol();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ return TypeFromCompareType(state);
}
@@ -479,9 +458,14 @@
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- switch (type) {
+ BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &unused_result_type);
+ // CountOperations should always have +1 or -1 as their right input.
+ ASSERT(right_type == BinaryOpIC::SMI ||
+ right_type == BinaryOpIC::UNINITIALIZED);
+
+ switch (left_type) {
case BinaryOpIC::UNINITIALIZED:
case BinaryOpIC::SMI:
return TypeInfo::Smi();
@@ -489,7 +473,6 @@
return TypeInfo::Integer32();
case BinaryOpIC::HEAP_NUMBER:
return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
case BinaryOpIC::STRING:
case BinaryOpIC::GENERIC:
return unknown;
diff --git a/src/type-info.h b/src/type-info.h
index 00d88c2..8b2ec49 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -204,6 +204,7 @@
kNonPrimitive = 0x40, // 1000000
kUninitialized = 0x7f // 1111111
};
+
explicit inline TypeInfo(Type t) : type_(t) { }
Type type_;
@@ -287,9 +288,14 @@
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
- TypeInfo BinaryType(BinaryOperation* expr);
- TypeInfo CompareType(CompareOperation* expr);
- bool IsSymbolCompare(CompareOperation* expr);
+ void BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result);
+ void CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type);
Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
diff --git a/src/v8conversions.cc b/src/v8conversions.cc
index bf175e5..c6755d5 100644
--- a/src/v8conversions.cc
+++ b/src/v8conversions.cc
@@ -84,7 +84,7 @@
String* str, int flags, double empty_string_val) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* begin = SeqOneByteString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
@@ -109,7 +109,7 @@
int radix) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* begin = SeqOneByteString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
diff --git a/src/v8natives.js b/src/v8natives.js
index 20fc74d..112c2c5 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -970,7 +970,7 @@
}
var n = ToUint32(obj.length);
var array = new $Array(n);
- var names = {}; // TODO(rossberg): use sets once they are ready.
+ var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
for (var index = 0; index < n; index++) {
var s = ToString(obj[index]);
if (%HasLocalProperty(names, s)) {
@@ -1025,7 +1025,7 @@
}
// Property names are expected to be unique strings.
- var propertySet = {};
+ var propertySet = { __proto__: null };
var j = 0;
for (var i = 0; i < propertyNames.length; ++i) {
var name = ToString(propertyNames[i]);
@@ -1066,7 +1066,7 @@
// Clone the attributes object for protection.
// TODO(rossberg): not spec'ed yet, so not sure if this should involve
// non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = {};
+ var attributesClone = { __proto__: null };
for (var a in attributes) {
attributesClone[a] = attributes[a];
}
diff --git a/src/variables.h b/src/variables.h
index ba26b80..bb35ee8 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -130,8 +130,8 @@
bool is_arguments() const { return kind_ == ARGUMENTS; }
// True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol());
+ bool is_possibly_eval(Isolate* isolate) const {
+ return IsVariable(isolate->factory()->eval_symbol());
}
Variable* local_if_not_shadowed() const {
diff --git a/src/version.cc b/src/version.cc
index b61775b..7bf2825 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 15
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index a8e52e9..a7fc1ed 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -637,6 +637,10 @@
class FloatingPointHelper : public AllStatic {
public:
+ enum ConvertUndefined {
+ CONVERT_UNDEFINED_TO_ZERO,
+ BAILOUT_ON_UNDEFINED
+ };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
@@ -672,7 +676,8 @@
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis);
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined);
};
@@ -997,16 +1002,15 @@
}
+void BinaryOpStub::Initialize() {}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
@@ -1015,69 +1019,16 @@
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
@@ -1086,9 +1037,9 @@
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
+ (op == Token::ADD || op == Token::SUB ||
+ op == Token::MUL || op == Token::DIV || op == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
@@ -1096,7 +1047,7 @@
Label use_fp_on_smis;
Label fail;
- if (op_ != Token::BIT_OR) {
+ if (op != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, ¬_smis);
}
@@ -1105,7 +1056,7 @@
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
+ switch (op) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
@@ -1177,7 +1128,7 @@
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
@@ -1186,12 +1137,12 @@
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1214,31 +1165,50 @@
// values that could be smi.
__ bind(¬_smis);
Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::ConvertUndefined convert_undefined =
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED;
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
+ }
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail);
+ &smi_values, &fail, convert_undefined);
__ jmp(&smi_values);
__ bind(&fail);
}
-void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure,
+ Token::Value op,
+ OverwriteMode mode) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, allocation_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, allocation_failure, mode);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
@@ -1259,7 +1229,7 @@
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
@@ -1283,7 +1253,7 @@
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
@@ -1320,12 +1290,12 @@
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "BinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub_GenerateFloatingPointCode.");
}
}
-void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -1356,58 +1326,17 @@
}
-void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -1416,24 +1345,22 @@
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateTypeTransition(masm);
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ UNREACHABLE();
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1467,7 +1394,7 @@
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
// Convert oddball arguments to numbers.
@@ -1494,39 +1421,79 @@
}
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
+ Register input,
+ Label* fail) {
+ Label ok;
+ __ JumpIfSmi(input, &ok, Label::kNear);
+ Register heap_number_map = r8;
+ Register scratch1 = r9;
+ Register scratch2 = r10;
+ // HeapNumbers containing 32bit integer values are also allowed.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, fail);
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
+ // Convert, convert back, and compare the two doubles' bits.
+ __ cvttsd2siq(scratch2, xmm0);
+ __ cvtlsi2sd(xmm1, scratch2);
+ __ movq(scratch1, xmm0);
+ __ movq(scratch2, xmm1);
+ __ cmpq(scratch1, scratch2);
+ __ j(not_equal, fail);
+ __ bind(&ok);
+}
+
+
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, ¬_number);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rax, ¬_number);
+ }
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &gc_required, ¬_number, op_, mode_);
__ bind(¬_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
@@ -2022,17 +1989,21 @@
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis) {
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
- Label done;
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_first
+ : on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2045,11 +2016,15 @@
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
+ __ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
__ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_second
+ : on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2062,8 +2037,25 @@
if (on_success != NULL) {
__ jmp(on_success);
} else {
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&maybe_undefined_first);
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(first, first);
+ __ jmp(&first_done);
+
+ __ bind(&maybe_undefined_second);
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(second, second);
+ if (on_success != NULL) {
+ __ jmp(on_success);
+ }
+ // Else: fall through.
+
+ __ bind(&done);
}
@@ -3022,8 +3014,8 @@
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
@@ -3163,7 +3155,7 @@
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -3378,30 +3370,59 @@
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, fail);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
+ Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
+ Label miss;
+ CheckInputType(masm, rdx, left_, &miss);
+ CheckInputType(masm, rax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -3414,66 +3435,58 @@
__ cmpq(rax, rdx);
__ j(not_equal, ¬_identical, Label::kNear);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc_));
+ __ Set(rax, NegativeComparisonResult(cc));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, ¬_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical, Label::kNear);
}
+ __ Set(rax, EQUAL);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc == greater_equal || cc == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
__ bind(¬_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ if (cc == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
+ if (strict()) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
@@ -3525,40 +3538,38 @@
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
}
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc_ == equal) {
+ if (cc == equal) {
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
@@ -3574,7 +3585,7 @@
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
@@ -3595,7 +3606,7 @@
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -3635,11 +3646,11 @@
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
// Restore return address on the stack.
@@ -3648,22 +3659,9 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -4421,44 +4419,6 @@
Register InstanceofStub::right() { return no_reg; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -4696,8 +4656,8 @@
&call_runtime);
// Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -4713,11 +4673,11 @@
// rbx - first byte: first character
// rbx - second byte: *maybe* second character
// Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ shll(rcx, Immediate(kBitsPerByte));
__ orl(rbx, rcx);
// Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4765,11 +4725,6 @@
// r9: second instance type.
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
__ jmp(&allocated);
@@ -4799,8 +4754,8 @@
__ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
__ bind(&first_prepared);
// Check whether both strings have same encoding.
@@ -4820,8 +4775,8 @@
__ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
@@ -4837,7 +4792,7 @@
__ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
// rax: result string
// Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rcx: first char of first string
// rbx: first character of result
// r14: length of first string
@@ -5110,7 +5065,7 @@
temp, temp, &next_probe[i]);
// Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ andl(temp, Immediate(0x0000ffff));
__ cmpl(chars, temp);
__ j(equal, &found_in_symbol_table);
@@ -5328,7 +5283,7 @@
__ j(not_zero, &runtime);
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -5345,10 +5300,10 @@
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -5370,7 +5325,7 @@
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
__ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
@@ -5510,9 +5465,9 @@
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -5568,7 +5523,7 @@
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -5592,23 +5547,41 @@
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub, Label::kNear);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rdx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rax, &miss);
+ }
+
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(rax, &right_smi, Label::kNear);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
-
- // Load left and right operand
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
+ __ cvtlsi2sd(xmm1, rcx);
+ __ bind(&left);
+ __ JumpIfSmi(rdx, &left_smi, Label::kNear);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
+ __ cvtlsi2sd(xmm0, rcx);
+
+ __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
@@ -5624,14 +5597,16 @@
__ ret(0);
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
+ __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -5649,7 +5624,7 @@
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -5692,7 +5667,7 @@
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -5778,7 +5753,7 @@
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 6a1a18f..ab8ea76 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -79,13 +79,6 @@
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -157,95 +150,6 @@
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index ffccf47..aa777ba 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -571,7 +571,7 @@
__ movzxbl(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
__ bind(&done);
}
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a198739..6bab01b 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -119,7 +119,7 @@
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -2304,7 +2304,7 @@
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -3558,7 +3558,7 @@
__ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ incl(index);
__ cmpl(index, array_length);
@@ -3605,7 +3605,7 @@
// Add (separator length times (array_length - 1)) to string_length.
__ SmiToInteger32(scratch,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ decl(index);
__ imull(scratch, index);
__ j(overflow, &bailout);
@@ -3618,10 +3618,10 @@
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3647,7 +3647,7 @@
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ bind(&loop_1_condition);
@@ -3665,7 +3665,7 @@
__ bind(&one_char_separator);
// Get the separator ASCII character value.
// Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ Set(index, 0);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3691,7 +3691,7 @@
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ cmpl(index, array_length_operand);
@@ -3716,7 +3716,7 @@
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movq(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
@@ -3742,7 +3742,7 @@
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
@@ -4060,13 +4060,9 @@
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4285,29 +4281,7 @@
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index efa07a8..641e243 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1729,7 +1729,7 @@
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1740,39 +1740,6 @@
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 2bcf943..13c5fa5 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -236,7 +236,30 @@
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a874a24..45b1572 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1389,7 +1389,7 @@
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -2152,7 +2152,7 @@
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 79ce968..52fe7aa 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -620,7 +620,7 @@
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 962c2e8..5b85a24 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2201,16 +2201,19 @@
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
+ kStringRepresentationMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
+ shl(scratch1, Immediate(8));
+ orl(scratch1, scratch2);
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
j(not_equal, on_fail, near_jump);
}
@@ -2246,17 +2249,19 @@
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask
+ | kStringEncodingMask | kAsciiDataHintTag;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
+ shl(scratch1, Immediate(8));
+ orl(scratch1, scratch2);
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
j(not_equal, on_fail, near_jump);
}
@@ -2769,7 +2774,8 @@
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2788,7 +2794,8 @@
bind(¬_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@@ -2811,7 +2818,8 @@
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}
@@ -3957,7 +3965,7 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
@@ -3968,7 +3976,7 @@
}
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
result,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index cc057ac..0d8d6f2 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -895,7 +895,8 @@
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 3495e8d..26a97ab 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1487,7 +1487,7 @@
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1495,7 +1495,7 @@
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1526,6 +1526,34 @@
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+ // Check that the elements are in double mode.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ factory()->fixed_double_array_map());
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into rax and calculate new length.
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ addl(rax, Immediate(argc));
+
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmpl(rax, rcx);
+ __ j(greater, &call_builtin);
+
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1537,6 +1565,9 @@
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(¬_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &call_builtin);
// rdx: receiver
// rbx: map