Version 3.14.4
Allow evals for debugger even if they are prohibited in the debugee context. (Chromium issue 154733)
Enabled --verify-heap in release mode (issue 2120)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@12729 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 7a65ab2..129e62b 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1850,7 +1850,7 @@
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
- vldr(src, operand.rn(), operand.offset(), cond);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 8b0779b..a6aff15 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -655,11 +655,9 @@
Register scratch1,
Register scratch2,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -716,11 +714,9 @@
Register scratch3,
DwVfpRegister double_scratch,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -849,11 +845,9 @@
__ b(&done);
__ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
@@ -920,11 +914,9 @@
__ UntagAndJumpIfSmi(dst, object, &done);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
@@ -2545,9 +2537,9 @@
Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
}
Register heap_number_map = r6;
@@ -7065,8 +7057,7 @@
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 8ff3e93..1209372 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -2659,7 +2659,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (generate_debug_code_) __ AbortIfSmi(r0);
+ __ AssertNotSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
@@ -3534,8 +3534,7 @@
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AbortIfNotString(r0);
-
+ __ AssertString(r0);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index e1ba048..d513083 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2164,12 +2164,10 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b5a0f9a..d5b67a7 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2295,7 +2295,7 @@
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ AbortIfNotString(input);
+ __ AssertString(input);
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2904,14 +2904,9 @@
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
- __ sub(length, length, index, SetCC);
- DeoptimizeIf(ls, instr->environment());
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
+ __ sub(length, length, index);
__ add(length, length, Operand(1));
__ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
}
@@ -4274,9 +4269,7 @@
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(r0);
- }
+ __ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -4446,7 +4439,7 @@
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4461,12 +4454,13 @@
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
+ __ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sub(ip, dst, Operand(kHeapObjectTag));
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ __ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4491,13 +4485,16 @@
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
}
@@ -4510,6 +4507,7 @@
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b74d09d..4da5387 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1570,7 +1570,11 @@
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
mov(topaddr, Operand(new_space_allocation_top));
- mov(obj_size_reg, Operand(object_size));
+ Operand obj_size_operand = Operand(object_size);
+ if (!obj_size_operand.is_single_instruction(this)) {
+ // We are about to steal IP, so we need to load this value first
+ mov(obj_size_reg, obj_size_operand);
+ }
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1592,7 +1596,13 @@
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- add(scratch2, result, Operand(obj_size_reg), SetCC);
+ if (obj_size_operand.is_single_instruction(this)) {
+ // We can add the size as an immediate
+ add(scratch2, result, obj_size_operand, SetCC);
+ } else {
+ // Doesn't fit in an immediate, we have to use the register
+ add(scratch2, result, obj_size_reg, SetCC);
+ }
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@@ -3035,38 +3045,46 @@
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(eq, "Operand is not smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(eq, "Operand is not smi");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(lo, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi and not a string");
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(lo, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- CompareRoot(src, root_value_index);
- Assert(eq, message);
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ CompareRoot(src, root_value_index);
+ Check(eq, message);
+ }
}
@@ -3124,7 +3142,8 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required) {
+ Label* gc_required,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3132,11 +3151,16 @@
scratch1,
scratch2,
gc_required,
- TAG_OBJECT);
+ tagging_mode == TAG_RESULT ? TAG_OBJECT :
+ NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index c8a6a94..c7032ba 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -68,6 +68,13 @@
SIZE_IN_WORDS = 1 << 2
};
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
// Flags used for the ObjectToDoubleVFPRegister function.
enum ObjectToDoubleFlags {
@@ -731,7 +738,8 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
@@ -1201,17 +1209,18 @@
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 5937e39..d3b5862 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -3787,22 +3787,28 @@
__ Ret();
__ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ vstr(d0, r5, HeapNumber::kValueOffset);
__ Ret();
} else {
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
Register dst1 = r1;
Register dst2 = r3;
FloatingPointHelper::Destination dest =
@@ -3838,13 +3844,12 @@
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3876,7 +3881,7 @@
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -3893,19 +3898,18 @@
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -3961,18 +3965,17 @@
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
@@ -4439,7 +4442,7 @@
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
+ heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
diff --git a/src/assembler.cc b/src/assembler.cc
index a58f77f..d81d4ae 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -697,7 +697,7 @@
#endif // ENABLE_DISASSEMBLER
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void RelocInfo::Verify() {
switch (rmode_) {
case EMBEDDED_OBJECT:
@@ -717,12 +717,12 @@
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
- ASSERT(addr != NULL);
+ CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = HEAP->FindCodeObject(addr);
- ASSERT(found->IsCode());
- ASSERT(code->address() == HeapObject::cast(found)->address());
+ CHECK(found->IsCode());
+ CHECK(code->address() == HeapObject::cast(found)->address());
break;
}
case RUNTIME_ENTRY:
@@ -741,7 +741,7 @@
break;
}
}
-#endif // DEBUG
+#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
diff --git a/src/assembler.h b/src/assembler.h
index cb5a72d..0bf28ae 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -349,8 +349,7 @@
static const char* RelocModeName(Mode rmode);
void Print(FILE* out);
#endif // ENABLE_DISASSEMBLER
-#ifdef DEBUG
- // Debugging
+#ifdef VERIFY_HEAP
void Verify();
#endif
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 9fb79e7..4b40d92 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1806,7 +1806,7 @@
native_context()->set_regexp_result_map(*initial_map);
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
builtins->Verify();
#endif
diff --git a/src/execution.cc b/src/execution.cc
index 330e41f..89091ba 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -118,7 +118,7 @@
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
value->Verify();
#endif
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a3e7dfb..02867ba 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -395,6 +395,9 @@
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
+#ifdef VERIFY_HEAP
+DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+#endif
// v8.cc
DEFINE_bool(use_idle_notification, true,
@@ -568,7 +571,6 @@
DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC")
-DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(verify_native_context_separation, false,
"verify that code holds on to at most one native context after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 13be88a..876d8d8 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -640,9 +640,11 @@
void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
@@ -741,17 +743,15 @@
}
-#ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->Contains(object));
- ASSERT(object->map()->IsMap());
+ CHECK(HEAP->Contains(object));
+ CHECK(object->map()->IsMap());
}
}
}
-#endif
double GCTracer::SizeOfHeapObjects() {
diff --git a/src/heap.cc b/src/heap.cc
index d5d1128..5de06aa 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -98,6 +98,7 @@
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
+ flush_monomorphic_ics_(false),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -404,18 +405,19 @@
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+#endif
+
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false);
- if (FLAG_verify_heap) {
- Verify();
- }
-
if (FLAG_gc_verbose) Print();
-#endif // DEBUG
-#if defined(DEBUG)
ReportStatisticsBeforeGC();
#endif // DEBUG
@@ -447,14 +449,20 @@
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
-#ifdef DEBUG
- allow_allocation(true);
- ZapFromSpace();
+ // In release mode, we only zap the from space under heap verification.
+ if (Heap::ShouldZapGarbage()) {
+ ZapFromSpace();
+ }
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
+#ifdef DEBUG
+ allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
@@ -651,7 +659,7 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Helper class for verifying the symbol table.
class SymbolTableVerifier : public ObjectVisitor {
public:
@@ -660,20 +668,18 @@
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
// Check that the symbol is actually a symbol.
- ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
+ CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
}
}
}
};
-#endif // DEBUG
static void VerifySymbolTable() {
-#ifdef DEBUG
SymbolTableVerifier verifier;
HEAP->symbol_table()->IterateElements(&verifier);
-#endif // DEBUG
}
+#endif // VERIFY_HEAP
static bool AbortIncrementalMarkingAndCollectGarbage(
@@ -830,9 +836,12 @@
PROFILE(isolate_, CodeMovingGCEvent());
}
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifySymbolTable();
}
+#endif
+
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -959,9 +968,12 @@
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifySymbolTable();
}
+#endif
return next_gc_likely_to_collect_more;
}
@@ -988,7 +1000,7 @@
contexts_disposed_ = 0;
- isolate_->set_context_exit_happened(false);
+ flush_monomorphic_ics_ = false;
}
@@ -1044,7 +1056,7 @@
};
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Visitor class to verify pointers in code or data space do not point into
// new space.
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
@@ -1052,7 +1064,7 @@
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
+ CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
}
}
}
@@ -1077,7 +1089,7 @@
object->Iterate(&v);
}
}
-#endif
+#endif // VERIFY_HEAP
void Heap::CheckNewSpaceExpansionCriteria() {
@@ -1216,7 +1228,8 @@
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
-#ifdef DEBUG
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@@ -1353,9 +1366,11 @@
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
external_string_table_.Verify();
}
+#endif
if (external_string_table_.new_space_strings_.is_empty()) return;
@@ -3378,7 +3393,7 @@
}
ASSERT(buffer->IsFlat());
-#if DEBUG
+#if VERIFY_HEAP
if (FLAG_verify_heap) {
buffer->StringVerify();
}
@@ -3642,7 +3657,7 @@
// through the self_reference parameter.
code->CopyFrom(desc);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
code->Verify();
}
@@ -3724,7 +3739,7 @@
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
code->Verify();
}
@@ -4597,14 +4612,14 @@
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
-#endif // DEBUG
+#endif
return result;
}
@@ -5394,9 +5409,9 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void Heap::Verify() {
- ASSERT(HasBeenSetUp());
+ CHECK(HasBeenSetUp());
store_buffer()->Verify();
@@ -5415,9 +5430,7 @@
lo_space_->Verify();
}
-
-
-#endif // DEBUG
+#endif
MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
@@ -5509,8 +5522,6 @@
return symbol_table()->LookupSymbolIfExists(string, symbol);
}
-
-#ifdef DEBUG
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -5523,7 +5534,6 @@
}
}
}
-#endif // DEBUG
void Heap::IterateAndMarkPointersToFromSpace(Address start,
@@ -6260,11 +6270,12 @@
void Heap::TearDown() {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
+
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
@@ -7194,9 +7205,11 @@
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
diff --git a/src/heap.h b/src/heap.h
index d6a3d29..04bffdd 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1103,7 +1103,10 @@
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() { return ++contexts_disposed_; }
+ int NotifyContextDisposed() {
+ flush_monomorphic_ics_ = true;
+ return ++contexts_disposed_;
+ }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -1261,13 +1264,15 @@
return &native_contexts_list_;
}
+#ifdef VERIFY_HEAP
+ // Verify the heap is in its normal state before or after a GC.
+ void Verify();
+#endif
+
#ifdef DEBUG
void Print();
void PrintHandles();
- // Verify the heap is in its normal state before or after a GC.
- void Verify();
-
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
@@ -1275,10 +1280,23 @@
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
+#endif
+
+ // Zapping is needed for verify heap, and always done in debug builds.
+ static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+ return true;
+#else
+#ifdef VERIFY_HEAP
+ return FLAG_verify_heap;
+#else
+ return false;
+#endif
+#endif
+ }
// Fill in bogus values in from space
void ZapFromSpace();
-#endif
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -1620,6 +1638,8 @@
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
+ bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
+
intptr_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
@@ -1705,6 +1725,8 @@
int global_ic_age_;
+ bool flush_monomorphic_ics_;
+
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
@@ -2201,7 +2223,6 @@
};
-#ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2211,7 +2232,6 @@
public:
inline void VisitPointers(Object** start, Object** end);
};
-#endif
// Space iterator for iterating over all spaces of the heap.
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 628d987..374e54c 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -9016,8 +9016,10 @@
HInstruction* elements = AddInstruction(
new(zone()) HArgumentsElements(false));
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
+ HInstruction* checked_index =
+ AddInstruction(new(zone()) HBoundsCheck(index, length));
HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, index);
+ new(zone()) HAccessArgumentsAt(elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 142abb5..3b6987e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -5732,7 +5732,7 @@
__ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
+ __ AssertSmi(ebx);
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
@@ -6981,8 +6981,7 @@
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 72021b4..406537d 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2607,7 +2607,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (generate_debug_code_) __ AbortIfSmi(eax);
+ __ AssertNotSmi(eax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2838,7 +2838,7 @@
__ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (generate_debug_code_) __ AbortIfNotSmi(eax);
+ __ AssertSmi(eax);
context()->Plug(eax);
}
@@ -3462,9 +3462,7 @@
VisitForAccumulatorValue(args->at(0));
- if (generate_debug_code_) {
- __ AbortIfNotString(eax);
- }
+ __ AssertString(eax);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -3487,7 +3485,7 @@
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AbortIfNotString(eax);
+ __ AssertString(eax);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ IndexFromHash(eax, eax);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index da17e29..32c66a0 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2109,7 +2109,7 @@
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ AbortIfNotString(input);
+ __ AssertString(input);
__ mov(result, FieldOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2742,12 +2742,9 @@
Register length = ToRegister(instr->length());
Operand index = ToOperand(instr->index());
Register result = ToRegister(instr->result());
-
- __ sub(length, index);
- DeoptimizeIf(below_equal, instr->environment());
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
@@ -4045,9 +4042,7 @@
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
instr, instr->context());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(eax);
- }
+ __ AssertSmi(eax);
__ SmiUntag(eax);
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -4301,9 +4296,7 @@
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
} else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(ToRegister(input));
- }
+ __ AssertSmi(ToRegister(input));
}
__ SmiUntag(ToRegister(input));
}
@@ -4836,7 +4829,7 @@
__ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
- __ AbortIfSmi(map);
+ __ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
instance_size >> kPointerSizeLog2);
__ Assert(equal, "Unexpected instance size");
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 38e7480..4b08a6d 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2279,12 +2279,10 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 6fbe5b8..1a64c45 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -274,9 +274,7 @@
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (!FLAG_incremental_marking) {
return;
@@ -323,9 +321,7 @@
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
@@ -668,36 +664,44 @@
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- JumpIfSmi(object, &ok);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfSmi(object, &ok);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, "Operand not a number");
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(equal, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(equal, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- mov(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi and not a string");
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi");
+ }
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 909000e..e48d0e7 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -504,17 +504,17 @@
}
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 32a084d..e51d6c1 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -181,10 +181,6 @@
static void Initialize() {
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
- table_.Register(kVisitJSFunction, &VisitJSFunction);
-
table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
@@ -195,31 +191,7 @@
HeapObject::RawField(object, JSWeakMap::kSize));
}
- static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
- FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
- SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
- }
-
- static inline void VisitJSFunction(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- // Iterate over all fields in the body but take care in dealing with
- // the code entry and skip weak fields.
- VisitPointers(heap,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
- VisitPointers(heap,
- HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object,
- JSFunction::kNonWeakFieldsEndOffset));
- }
+ static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
Object* obj = *p;
@@ -561,8 +533,8 @@
ActivateIncrementalWriteBarrier();
-#ifdef DEBUG
// Marking bits are cleared by the sweeper.
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index be70dc3..37fec4e 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -71,14 +71,14 @@
encountered_weak_maps_(NULL) { }
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+ CHECK(HEAP->mark_compact_collector()->IsMarked(object));
}
}
}
@@ -95,7 +95,7 @@
current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
+ CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
}
@@ -108,12 +108,12 @@
NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
- ASSERT_EQ(space->bottom(),
+ CHECK_EQ(space->bottom(),
NewSpacePage::FromAddress(space->bottom())->area_start());
while (it.has_next()) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
- ASSERT(limit == end || !page->Contains(end));
+ CHECK(limit == end || !page->Contains(end));
VerifyMarking(page->area_start(), limit);
}
}
@@ -173,7 +173,7 @@
current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
+ CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
}
@@ -189,7 +189,7 @@
NewSpacePage* page = it.next();
Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top();
- ASSERT(limit == space->top() || !page->Contains(space->top()));
+ CHECK(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
object->Iterate(&visitor);
@@ -221,8 +221,10 @@
VerifyEvacuationVisitor visitor;
heap->IterateStrongRoots(&visitor, VISIT_ALL);
}
+#endif // VERIFY_HEAP
+#ifdef DEBUG
class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
public:
VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
@@ -384,7 +386,7 @@
ClearWeakMaps();
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
}
@@ -406,7 +408,7 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
PageIterator it(space);
@@ -417,6 +419,7 @@
}
}
+
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), space->top());
@@ -427,6 +430,7 @@
}
}
+
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_pointer_space());
VerifyMarkbitsAreClean(heap_->old_data_space());
@@ -438,11 +442,11 @@
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsWhite(mark_bit));
- ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
-#endif
+#endif // VERIFY_HEAP
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
@@ -804,7 +808,7 @@
space->PrepareForMarkCompact();
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (!was_marked_incrementally_ && FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
@@ -855,133 +859,69 @@
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
+void CodeFlusher::ProcessJSFunctionCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
- void AddCandidate(SharedFunctionInfo* shared_info) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
- void AddCandidate(JSFunction* function) {
- ASSERT(function->code() == function->shared()->code());
+ SharedFunctionInfo* shared = candidate->shared();
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
-
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
-
- private:
- void ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- SharedFunctionInfo* shared = candidate->shared();
-
- Code* code = shared->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(shared->code());
- }
-
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->
- RecordCodeEntrySlot(slot, target);
-
- RecordSharedFunctionInfoCodeSlot(shared);
-
- candidate = next_candidate;
+ Code* code = shared->code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ shared->set_code(lazy_compile);
+ candidate->set_code(lazy_compile);
+ } else {
+ candidate->set_code(shared->code());
}
- jsfunction_candidates_head_ = NULL;
- }
-
-
- void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- SetNextCandidate(candidate, NULL);
-
- Code* code = candidate->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- candidate->set_code(lazy_compile);
- }
-
- RecordSharedFunctionInfoCodeSlot(candidate);
-
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
- }
-
- void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
- Object** slot = HeapObject::RawField(shared,
- SharedFunctionInfo::kCodeOffset);
+ // We are in the middle of a GC cycle so the write barrier in the code
+ // setter did not record the slot update and we have to do that manually.
+ Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
isolate_->heap()->mark_compact_collector()->
- RecordSlot(slot, slot, HeapObject::cast(*slot));
+ RecordCodeEntrySlot(slot, target);
+
+ Object** shared_code_slot =
+ HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
+
+ candidate = next_candidate;
}
- static JSFunction** GetNextCandidateField(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- candidate->address() + JSFunction::kCodeEntryOffset);
- }
+ jsfunction_candidates_head_ = NULL;
+}
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- return *GetNextCandidateField(candidate);
- }
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
- }
+void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
- static SharedFunctionInfo** GetNextCandidateField(
- SharedFunctionInfo* candidate) {
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ SetNextCandidate(candidate, NULL);
+
Code* code = candidate->code();
- return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kGCMetadataOffset);
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ candidate->set_code(lazy_compile);
+ }
+
+ Object** code_slot =
+ HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(code_slot, code_slot, *code_slot);
+
+ candidate = next_candidate;
}
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- return reinterpret_cast<SharedFunctionInfo*>(
- candidate->code()->gc_metadata());
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
-
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
+ shared_function_info_candidates_head_ = NULL;
+}
MarkCompactCollector::~MarkCompactCollector() {
@@ -1133,6 +1073,11 @@
return true;
}
+ INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ shared->BeforeVisitingPointers();
+ }
+
static void VisitJSWeakMap(Map* map, HeapObject* object) {
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
@@ -1176,123 +1121,8 @@
// Code flushing support.
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
static const int kRegExpCodeThreshold = 5;
- inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
- }
-
-
- inline static bool IsCompiled(JSFunction* function) {
- return function->code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsCompiled(SharedFunctionInfo* function) {
- return function->code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsFlushable(Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(function->code());
- if (code_mark.Get()) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
- return false;
- }
-
- // We do not flush code for optimized functions.
- if (function->code() != shared_info->code()) {
- return false;
- }
-
- return IsFlushable(heap, shared_info);
- }
-
- inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark =
- Marking::MarkBitFrom(shared_info->code());
- if (code_mark.Get()) {
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for Api functions.
- Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) {
- return false;
- }
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) {
- return false;
- }
-
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) {
- return false;
- }
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
-
- return true;
- }
-
-
- static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
- if (!IsFlushable(heap, function)) return false;
-
- // This function's code looks flushable. But we have to postpone the
- // decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would make the nonoptimized version of the code nonflushable,
- // because it is required for bailing out from optimized code.
- heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
- return true;
- }
-
- static inline bool IsValidNotBuiltinContext(Object* ctx) {
- return ctx->IsContext() &&
- !Context::cast(ctx)->global_object()->IsJSBuiltinsObject();
- }
-
-
- static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
- SharedFunctionInfo::cast(object)->BeforeVisitingPointers();
-
- FixedBodyVisitor<MarkCompactMarkingVisitor,
- SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
- }
-
-
static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
JSRegExp* re,
bool is_ascii) {
@@ -1366,138 +1196,6 @@
VisitJSRegExp(map, object);
}
-
- static void VisitSharedFunctionInfoAndFlushCode(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitSharedFunctionInfoGeneric(map, object);
- return;
- }
- VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
- }
-
-
- static void VisitSharedFunctionInfoAndFlushCodeGeneric(
- Map* map, HeapObject* object, bool known_flush_code_candidate) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- shared->BeforeVisitingPointers();
-
- if (!known_flush_code_candidate) {
- known_flush_code_candidate = IsFlushable(heap, shared);
- if (known_flush_code_candidate) {
- heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
- }
- }
-
- VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
- }
-
-
- static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitJSFunction(map, object);
- return;
- }
-
- JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
- // The function must have a valid context and not be a builtin.
- bool flush_code_candidate = false;
- if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
- }
-
- if (!flush_code_candidate) {
- Code* code = jsfunction->shared()->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- collector->MarkObject(code, code_mark);
-
- if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- collector->MarkInlinedFunctionsCode(jsfunction->code());
- }
- }
-
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- flush_code_candidate);
- }
-
-
- static void VisitJSFunction(Map* map, HeapObject* object) {
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- false);
- }
-
-
- static inline void VisitJSFunctionFields(Map* map,
- JSFunction* object,
- bool flush_code_candidate) {
- Heap* heap = map->GetHeap();
-
- VisitPointers(heap,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
- if (!flush_code_candidate) {
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
- } else {
- // Don't visit code object.
-
- // Visit shared function info to avoid double checking of its
- // flushability.
- SharedFunctionInfo* shared_info = object->unchecked_shared();
- MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
- if (!shared_info_mark.Get()) {
- Map* shared_info_map = shared_info->map();
- MarkBit shared_info_map_mark =
- Marking::MarkBitFrom(shared_info_map);
- heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
- heap->mark_compact_collector()->MarkObject(shared_info_map,
- shared_info_map_mark);
- VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
- shared_info,
- true);
- }
- }
-
- VisitPointers(
- heap,
- HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
- }
-
-
- static void VisitSharedFunctionInfoFields(Heap* heap,
- HeapObject* object,
- bool flush_code_candidate) {
- VisitPointer(heap,
- HeapObject::RawField(object, SharedFunctionInfo::kNameOffset));
-
- if (!flush_code_candidate) {
- VisitPointer(heap,
- HeapObject::RawField(object,
- SharedFunctionInfo::kCodeOffset));
- }
-
- VisitPointers(
- heap,
- HeapObject::RawField(object,
- SharedFunctionInfo::kOptimizedCodeMapOffset),
- HeapObject::RawField(object, SharedFunctionInfo::kSize));
- }
-
static VisitorDispatchTable<Callback> non_count_table_;
};
@@ -1634,12 +1332,6 @@
void MarkCompactMarkingVisitor::Initialize() {
StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- table_.Register(kVisitJSFunction,
- &VisitJSFunctionAndFlushCode);
-
table_.Register(kVisitJSRegExp,
&VisitRegExpAndFlushCode);
@@ -1714,26 +1406,6 @@
};
-void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
- // For optimized functions we should retain both non-optimized version
- // of its code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-
- FixedArray* literals = data->LiteralArray();
-
- for (int i = 0, count = data->InlinedFunctionCount()->value();
- i < count;
- i++) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- Code* inlined_code = inlined->shared()->code();
- MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
- MarkObject(inlined_code, inlined_code_mark);
- }
-}
-
-
void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
@@ -1746,7 +1418,8 @@
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
- MarkInlinedFunctionsCode(frame->LookupCode());
+ MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
+ frame->LookupCode());
}
}
}
@@ -2330,6 +2003,9 @@
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
+ // TODO(1609) Currently incremental marker does not support code flushing,
+ // we need to disable it before incremental marking steps for next cycle.
+ EnableCodeFlushing(false);
}
if (!FLAG_watch_ic_patching) {
@@ -3349,7 +3025,7 @@
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyEvacuation(heap_);
}
diff --git a/src/mark-compact.h b/src/mark-compact.h
index deade29..1d17582 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -403,6 +403,81 @@
};
+// CodeFlusher collects candidates for code flushing during marking and
+// processes those candidates after marking has completed in order to
+// reset those functions referencing code objects that would otherwise
+// be unreachable. Code objects can be referenced in two ways:
+// - SharedFunctionInfo references unoptimized code.
+// - JSFunction references either unoptimized or optimized code.
+// We are not allowed to flush unoptimized code for functions that got
+// optimized or inlined into optimized code, because we might bailout
+// into the unoptimized code again during deoptimization.
+class CodeFlusher {
+ public:
+ explicit CodeFlusher(Isolate* isolate)
+ : isolate_(isolate),
+ jsfunction_candidates_head_(NULL),
+ shared_function_info_candidates_head_(NULL) {}
+
+ void AddCandidate(SharedFunctionInfo* shared_info) {
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
+ }
+
+ void AddCandidate(JSFunction* function) {
+ ASSERT(function->code() == function->shared()->code());
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
+ }
+
+ void ProcessCandidates() {
+ ProcessSharedFunctionInfoCandidates();
+ ProcessJSFunctionCandidates();
+ }
+
+ private:
+ void ProcessJSFunctionCandidates();
+ void ProcessSharedFunctionInfoCandidates();
+
+ static JSFunction** GetNextCandidateField(JSFunction* candidate) {
+ return reinterpret_cast<JSFunction**>(
+ candidate->address() + JSFunction::kCodeEntryOffset);
+ }
+
+ static JSFunction* GetNextCandidate(JSFunction* candidate) {
+ return *GetNextCandidateField(candidate);
+ }
+
+ static void SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate) {
+ *GetNextCandidateField(candidate) = next_candidate;
+ }
+
+ static SharedFunctionInfo** GetNextCandidateField(
+ SharedFunctionInfo* candidate) {
+ Code* code = candidate->code();
+ return reinterpret_cast<SharedFunctionInfo**>(
+ code->address() + Code::kGCMetadataOffset);
+ }
+
+ static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+ return reinterpret_cast<SharedFunctionInfo*>(
+ candidate->code()->gc_metadata());
+ }
+
+ static void SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate) {
+ candidate->code()->set_gc_metadata(next_candidate);
+ }
+
+ Isolate* isolate_;
+ JSFunction* jsfunction_candidates_head_;
+ SharedFunctionInfo* shared_function_info_candidates_head_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
+};
+
+
// Defined in isolate.h.
class ThreadLocalTop;
@@ -497,7 +572,7 @@
PRECISE
};
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
@@ -631,10 +706,6 @@
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- // Mark non-optimize code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- void MarkInlinedFunctionsCode(Code* code);
-
// Mark code objects that are active on the stack to prevent them
// from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index ee3127a..0b551fb 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -655,11 +655,9 @@
Register scratch1,
Register scratch2,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -721,11 +719,9 @@
Register scratch3,
FPURegister double_scratch,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -861,11 +857,9 @@
__ Branch(&done);
__ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
@@ -932,11 +926,9 @@
__ UntagAndJumpIfSmi(dst, object, &done);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
@@ -2619,9 +2611,9 @@
Register scratch3 = t0;
ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
}
Register heap_number_map = t2;
@@ -7265,8 +7257,7 @@
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
// Compute the capacity mask.
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index ed8a2e2..4f09747 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -2690,7 +2690,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (generate_debug_code_) __ AbortIfSmi(v0);
+ __ AssertNotSmi(v0);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
@@ -3571,7 +3571,7 @@
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AbortIfNotString(v0);
+ __ AssertString(v0);
__ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
__ IndexFromHash(v0, v0);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 73b42fd..77d4b9b 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -911,7 +911,7 @@
void LCodeGen::DoModI(LModI* instr) {
Register scratch = scratch0();
- const Register left = ToRegister(instr->InputAt(0));
+ const Register left = ToRegister(instr->left());
const Register result = ToRegister(instr->result());
Label done;
@@ -939,7 +939,7 @@
__ And(result, scratch, p2constant - 1);
} else {
// div runs in the background while we check for special cases.
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ Register right = EmitLoadRegister(instr->right(), scratch);
__ div(left, right);
// Check for x % 0.
@@ -959,8 +959,8 @@
void LCodeGen::DoDivI(LDivI* instr) {
- const Register left = ToRegister(instr->InputAt(0));
- const Register right = ToRegister(instr->InputAt(1));
+ const Register left = ToRegister(instr->left());
+ const Register right = ToRegister(instr->right());
const Register result = ToRegister(instr->result());
// On MIPS div is asynchronous - it will run in the background while we
@@ -998,8 +998,8 @@
Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
@@ -1069,7 +1069,7 @@
} else {
Register right = EmitLoadRegister(right_op, scratch);
if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->TempAt(0)), left, right);
+ __ Or(ToRegister(instr->temp()), left, right);
}
if (can_overflow) {
@@ -1089,7 +1089,7 @@
__ Branch(&done, ne, result, Operand(zero_reg));
DeoptimizeIf(lt,
instr->environment(),
- ToRegister(instr->TempAt(0)),
+ ToRegister(instr->temp()),
Operand(zero_reg));
__ bind(&done);
}
@@ -1098,8 +1098,8 @@
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->InputAt(0);
- LOperand* right_op = instr->InputAt(1);
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
ASSERT(left_op->IsRegister());
Register left = ToRegister(left_op);
Register result = ToRegister(instr->result());
@@ -1132,8 +1132,8 @@
void LCodeGen::DoShiftI(LShiftI* instr) {
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
// result may alias either of them.
- LOperand* right_op = instr->InputAt(1);
- Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
if (right_op->IsRegister()) {
@@ -1195,8 +1195,8 @@
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
@@ -1260,28 +1260,28 @@
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
}
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->InputAt(0));
+ Register map = ToRegister(instr->value());
__ EnumLength(result, map);
}
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -1294,9 +1294,9 @@
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
Label done;
// If the object is a smi return the object.
@@ -1313,9 +1313,9 @@
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(a0));
@@ -1351,14 +1351,14 @@
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ Nor(result, zero_reg, Operand(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
CallRuntime(Runtime::kThrow, 1, instr);
@@ -1369,8 +1369,8 @@
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
@@ -1408,8 +1408,8 @@
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
if (instr->hydrogen()->representation().IsInteger32()) {
@@ -1470,8 +1470,8 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
@@ -1511,8 +1511,8 @@
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
- ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1576,15 +1576,15 @@
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -1718,8 +1718,8 @@
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1770,8 +1770,8 @@
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1780,7 +1780,7 @@
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1792,7 +1792,7 @@
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -1858,8 +1858,8 @@
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
Register temp2 = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1886,8 +1886,8 @@
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1905,15 +1905,15 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ Register input_reg = EmitLoadRegister(instr->value(), at);
__ And(at, input_reg, kSmiTagMask);
EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1982,7 +1982,7 @@
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2001,10 +2001,10 @@
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ AbortIfNotString(input);
+ __ AssertString(input);
__ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2013,7 +2013,7 @@
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2093,9 +2093,9 @@
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register temp = scratch0();
- Register temp2 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2111,8 +2111,8 @@
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -2123,8 +2123,8 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
Label true_label, done;
- ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
- ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
+ ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
+ ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
Register result = ToRegister(instr->result());
ASSERT(result.is(v0));
@@ -2161,8 +2161,8 @@
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
ASSERT(object.is(a0));
@@ -2237,7 +2237,7 @@
// Get the temp register reserved by the instruction. This needs to be t0 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
ASSERT(temp.is(t0));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
@@ -2332,7 +2332,7 @@
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload.
- Register payload = ToRegister(instr->TempAt(0));
+ Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
@@ -2415,7 +2415,7 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2568,7 +2568,7 @@
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
Register scratch = scratch0();
__ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
@@ -2601,7 +2601,7 @@
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->InputAt(0));
+ Register from_reg = ToRegister(instr->object());
__ lw(to_reg, FieldMemOperand(from_reg,
ExternalArray::kExternalPointerOffset));
}
@@ -2612,14 +2612,6 @@
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
-
- // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
- // as they do in Arm. It will save us an instruction.
- DeoptimizeIf(ls, instr->environment(), length, Operand(index));
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them, add one more.
__ subu(length, length, index);
@@ -2874,7 +2866,7 @@
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->InputAt(0));
+ Register elem = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Label done;
@@ -2992,7 +2984,7 @@
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
Abort("DoPushArgument not implemented for double type.");
} else {
@@ -3043,7 +3035,7 @@
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
+ Register global = ToRegister(instr->global_object());
Register result = ToRegister(instr->result());
__ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -3104,7 +3096,7 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3169,7 +3161,7 @@
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label done;
@@ -3200,7 +3192,7 @@
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- FPURegister input = ToDoubleRegister(instr->InputAt(0));
+ FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
} else if (r.IsInteger32()) {
@@ -3209,7 +3201,7 @@
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
@@ -3220,11 +3212,11 @@
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
- Register except_flag = ToRegister(instr->TempAt(0));
+ Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
single_scratch,
@@ -3251,7 +3243,7 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3328,16 +3320,16 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp = ToDoubleRegister(instr->temp());
ASSERT(!input.is(result));
@@ -3362,11 +3354,11 @@
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(f4));
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(a2));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(f4));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(a2));
+ ASSERT(ToDoubleRegister(instr->left()).is(f2));
ASSERT(ToDoubleRegister(instr->result()).is(f0));
if (exponent_type.IsTagged()) {
@@ -3403,7 +3395,7 @@
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->global_object()).is(a0));
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
@@ -3613,7 +3605,7 @@
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3639,7 +3631,7 @@
__ li(scratch, Operand(instr->transition()));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
@@ -3931,7 +3923,7 @@
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
@@ -3953,7 +3945,7 @@
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
@@ -3961,7 +3953,7 @@
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
@@ -4028,9 +4020,7 @@
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(v0);
- }
+ __ AssertSmi(v0);
__ SmiUntag(v0);
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4086,14 +4076,14 @@
void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->InputAt(0));
+ Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
__ lw(result, FieldMemOperand(string, String::kLengthOffset));
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -4110,7 +4100,7 @@
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
LOperand* output = instr->result();
FPURegister dbl_scratch = double_scratch0();
@@ -4126,7 +4116,7 @@
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_,
- instr_->InputAt(0),
+ instr_->value(),
SIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
@@ -4134,7 +4124,7 @@
LNumberTagI* instr_;
};
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->value());
Register dst = ToRegister(instr->result());
Register overflow = scratch0();
@@ -4152,7 +4142,7 @@
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_,
- instr_->InputAt(0),
+ instr_->value(),
UNSIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
@@ -4160,7 +4150,7 @@
LNumberTagU* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -4234,11 +4224,11 @@
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
@@ -4267,13 +4257,13 @@
void LCodeGen::DoSmiTag(LSmiTag* instr) {
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
@@ -4338,9 +4328,9 @@
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
FPURegister single_scratch = double_scratch.low();
@@ -4357,8 +4347,8 @@
// of the if.
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ Register scratch3 = ToRegister(instr->temp2());
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4428,7 +4418,7 @@
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4446,7 +4436,7 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -4464,12 +4454,12 @@
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
+ Register scratch3 = ToRegister(instr->temp2());
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4496,21 +4486,21 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ And(at, ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ And(at, ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
__ GetObjectType(input, scratch, scratch);
@@ -4580,7 +4570,7 @@
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
@@ -4599,7 +4589,7 @@
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
@@ -4615,7 +4605,7 @@
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4647,8 +4637,8 @@
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4689,8 +4679,8 @@
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
- Register scratch2 = ToRegister(instr->TempAt(1));
+ Register scratch = ToRegister(instr->temp());
+ Register scratch2 = ToRegister(instr->temp2());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4986,7 +4976,7 @@
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->value()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
__ push(a0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
@@ -5068,14 +5058,14 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -5202,7 +5192,7 @@
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index e9edd48..4015bff 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -194,22 +194,22 @@
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -218,57 +218,57 @@
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -278,7 +278,7 @@
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -292,26 +292,26 @@
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -340,17 +340,15 @@
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
-
stream->Add(" length ");
length()->PrintTo(stream);
-
stream->Add(" index ");
index()->PrintTo(stream);
}
@@ -2108,12 +2106,10 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 367cf2a..939fc67 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -259,9 +259,6 @@
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -275,6 +272,10 @@
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -295,11 +296,6 @@
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
@@ -307,6 +303,10 @@
private:
virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -524,6 +524,8 @@
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -550,16 +552,22 @@
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -572,6 +580,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -585,6 +596,10 @@
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -597,6 +612,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
@@ -616,6 +634,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -631,6 +652,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
@@ -643,6 +667,8 @@
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -655,6 +681,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -672,6 +700,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
@@ -686,6 +717,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
@@ -699,6 +733,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -713,6 +749,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
@@ -728,6 +767,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -744,6 +786,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -758,6 +802,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -769,6 +815,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
@@ -784,6 +832,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -799,6 +850,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -813,6 +867,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -824,6 +881,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -863,6 +923,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -879,7 +942,8 @@
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -897,6 +961,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -935,6 +1002,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -949,6 +1018,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -970,6 +1042,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -981,6 +1055,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
@@ -993,6 +1069,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
};
@@ -1003,6 +1081,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -1015,6 +1095,9 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1027,9 +1110,12 @@
temps_[0] = temp;
}
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Smi* index() const { return index_; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
- Smi* index() const { return index_; }
private:
Smi* index_;
@@ -1042,6 +1128,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1052,6 +1140,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1063,6 +1153,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1075,6 +1168,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -1087,6 +1183,9 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1098,6 +1197,8 @@
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1112,6 +1213,8 @@
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
@@ -1130,12 +1233,14 @@
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ Token::Value op() const { return op_; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
- Token::Value op() const { return op_; }
-
private:
Token::Value op_;
};
@@ -1147,6 +1252,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1157,6 +1264,8 @@
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
@@ -1168,10 +1277,10 @@
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
};
@@ -1181,10 +1290,11 @@
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1195,10 +1305,10 @@
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
};
@@ -1208,6 +1318,8 @@
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1218,6 +1330,8 @@
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
@@ -1230,11 +1344,12 @@
inputs_[1] = key;
}
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1246,12 +1361,13 @@
inputs_[1] = key;
}
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
"load-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1264,12 +1380,13 @@
inputs_[1] = key;
}
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1279,15 +1396,15 @@
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LLoadKeyedGeneric(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1304,10 +1421,11 @@
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1320,10 +1438,11 @@
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1335,12 +1454,13 @@
inputs_[1] = value;
}
+ LOperand* global_object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1351,10 +1471,11 @@
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1368,11 +1489,12 @@
inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1385,6 +1507,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1421,9 +1545,9 @@
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1440,9 +1564,9 @@
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
@@ -1452,9 +1576,9 @@
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global_object() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1476,11 +1600,11 @@
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1494,6 +1618,8 @@
inputs_[0] = key;
}
+ LOperand* key() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1522,10 +1648,11 @@
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* function() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1560,6 +1687,8 @@
inputs_[0] = constructor;
}
+ LOperand* constructor() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1585,6 +1714,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
@@ -1595,6 +1726,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
@@ -1605,6 +1738,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
@@ -1615,18 +1750,24 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1634,12 +1775,16 @@
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
- LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1651,15 +1796,20 @@
class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
LTaggedToI(LOperand* value,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1673,6 +1823,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1683,6 +1835,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1695,10 +1849,11 @@
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
@@ -1706,20 +1861,21 @@
class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1729,18 +1885,19 @@
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedGeneric(LOperand* object, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1748,21 +1905,22 @@
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1771,21 +1929,22 @@
public:
LStoreKeyedFastDoubleElement(LOperand* elements,
LOperand* key,
- LOperand* val) {
+ LOperand* value) {
inputs_[0] = elements;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
"store-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
@@ -1794,20 +1953,21 @@
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1815,22 +1975,21 @@
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
- LOperand* val) {
+ LOperand* value) {
inputs_[0] = external_pointer;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1839,21 +1998,22 @@
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1866,11 +2026,11 @@
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1882,11 +2042,11 @@
inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1896,10 +2056,10 @@
inputs_[0] = char_code;
}
+ LOperand* char_code() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
@@ -1909,10 +2069,10 @@
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1922,7 +2082,7 @@
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
@@ -1935,6 +2095,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1946,6 +2108,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
@@ -1953,11 +2117,14 @@
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -1972,6 +2139,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
@@ -1982,18 +2151,21 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
@@ -2001,8 +2173,8 @@
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -2013,12 +2185,13 @@
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2026,11 +2199,14 @@
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
- LAllocateObject(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LAllocateObject(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2079,6 +2255,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2090,6 +2268,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2100,6 +2280,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2115,6 +2297,8 @@
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
@@ -2122,15 +2306,15 @@
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LDeleteProperty(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index b53b5ea..e88e5be 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -4798,38 +4798,46 @@
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, "Operand is a smi", at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(eq, "Operand is a smi", at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Assert(eq, message, src, Operand(at));
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ ASSERT(!src.is(at));
+ LoadRoot(at, root_value_index);
+ Check(eq, message, src, Operand(at));
+ }
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index dcbeb9f..ad3004a 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1335,17 +1335,18 @@
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 6195bf5..be96592 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void MaybeObject::Verify() {
Object* this_as_object;
@@ -55,18 +55,18 @@
if (p->IsHeapObject()) {
HeapObject::VerifyHeapPointer(p);
} else {
- ASSERT(p->IsSmi());
+ CHECK(p->IsSmi());
}
}
void Smi::SmiVerify() {
- ASSERT(IsSmi());
+ CHECK(IsSmi());
}
void Failure::FailureVerify() {
- ASSERT(IsFailure());
+ CHECK(IsFailure());
}
@@ -207,68 +207,68 @@
void HeapObject::VerifyHeapPointer(Object* p) {
- ASSERT(p->IsHeapObject());
- ASSERT(HEAP->Contains(HeapObject::cast(p)));
+ CHECK(p->IsHeapObject());
+ CHECK(HEAP->Contains(HeapObject::cast(p)));
}
void HeapNumber::HeapNumberVerify() {
- ASSERT(IsHeapNumber());
+ CHECK(IsHeapNumber());
}
void ByteArray::ByteArrayVerify() {
- ASSERT(IsByteArray());
+ CHECK(IsByteArray());
}
void FreeSpace::FreeSpaceVerify() {
- ASSERT(IsFreeSpace());
+ CHECK(IsFreeSpace());
}
void ExternalPixelArray::ExternalPixelArrayVerify() {
- ASSERT(IsExternalPixelArray());
+ CHECK(IsExternalPixelArray());
}
void ExternalByteArray::ExternalByteArrayVerify() {
- ASSERT(IsExternalByteArray());
+ CHECK(IsExternalByteArray());
}
void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- ASSERT(IsExternalUnsignedByteArray());
+ CHECK(IsExternalUnsignedByteArray());
}
void ExternalShortArray::ExternalShortArrayVerify() {
- ASSERT(IsExternalShortArray());
+ CHECK(IsExternalShortArray());
}
void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- ASSERT(IsExternalUnsignedShortArray());
+ CHECK(IsExternalUnsignedShortArray());
}
void ExternalIntArray::ExternalIntArrayVerify() {
- ASSERT(IsExternalIntArray());
+ CHECK(IsExternalIntArray());
}
void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- ASSERT(IsExternalUnsignedIntArray());
+ CHECK(IsExternalUnsignedIntArray());
}
void ExternalFloatArray::ExternalFloatArrayVerify() {
- ASSERT(IsExternalFloatArray());
+ CHECK(IsExternalFloatArray());
}
void ExternalDoubleArray::ExternalDoubleArrayVerify() {
- ASSERT(IsExternalDoubleArray());
+ CHECK(IsExternalDoubleArray());
}
@@ -277,8 +277,8 @@
VerifyHeapPointer(elements());
if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
- ASSERT(this->elements()->IsFixedArray());
- ASSERT(this->elements()->length() >= 2);
+ CHECK(this->elements()->IsFixedArray());
+ CHECK_GE(this->elements()->length(), 2);
}
if (HasFastProperties()) {
@@ -286,25 +286,25 @@
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ((map()->has_fast_smi_or_object_elements() ||
+ CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT(map()->has_fast_object_elements() == HasFastObjectElements());
+ CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
}
void Map::MapVerify() {
- ASSERT(!HEAP->InNewSpace(this));
- ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- ASSERT(instance_size() == kVariableSizeSentinel ||
+ CHECK(!HEAP->InNewSpace(this));
+ CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
+ CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- ASSERT_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
+ CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
}
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
if (HasTransitionArray()) {
@@ -316,11 +316,11 @@
void Map::SharedMapVerify() {
MapVerify();
- ASSERT(is_shared());
- ASSERT(instance_descriptors()->IsEmpty());
- ASSERT_EQ(0, pre_allocated_property_fields());
- ASSERT_EQ(0, unused_property_fields());
- ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
+ CHECK(is_shared());
+ CHECK(instance_descriptors()->IsEmpty());
+ CHECK_EQ(0, pre_allocated_property_fields());
+ CHECK_EQ(0, unused_property_fields());
+ CHECK_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
visitor_id());
}
@@ -328,15 +328,15 @@
void CodeCache::CodeCacheVerify() {
VerifyHeapPointer(default_cache());
VerifyHeapPointer(normal_type_cache());
- ASSERT(default_cache()->IsFixedArray());
- ASSERT(normal_type_cache()->IsUndefined()
+ CHECK(default_cache()->IsFixedArray());
+ CHECK(normal_type_cache()->IsUndefined()
|| normal_type_cache()->IsCodeCacheHashTable());
}
void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
VerifyHeapPointer(cache());
- ASSERT(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
+ CHECK(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
}
@@ -368,7 +368,7 @@
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
double value = get_scalar(i);
- ASSERT(!isnan(value) ||
+ CHECK(!isnan(value) ||
(BitCast<uint64_t>(value) ==
BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
@@ -555,14 +555,14 @@
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- ASSERT(number == HEAP->nan_value());
+ CHECK(number == HEAP->nan_value());
} else {
- ASSERT(number->IsSmi());
+ CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
// Hidden oddballs have negative smis.
const int kLeastHiddenOddballNumber = -4;
- ASSERT(value <= 1);
- ASSERT(value >= kLeastHiddenOddballNumber);
+ CHECK_LE(value, 1);
+ CHECK(value >= kLeastHiddenOddballNumber);
}
}
@@ -591,8 +591,8 @@
void JSArray::JSArrayVerify() {
JSObjectVerify();
- ASSERT(length()->IsNumber() || length()->IsUndefined());
- ASSERT(elements()->IsUndefined() ||
+ CHECK(length()->IsNumber() || length()->IsUndefined());
+ CHECK(elements()->IsUndefined() ||
elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
}
@@ -602,7 +602,7 @@
CHECK(IsJSSet());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
@@ -610,7 +610,7 @@
CHECK(IsJSMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
@@ -618,17 +618,17 @@
CHECK(IsJSWeakMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
- ASSERT(data()->IsUndefined() || data()->IsFixedArray());
+ CHECK(data()->IsUndefined() || data()->IsFixedArray());
switch (TypeTag()) {
case JSRegExp::ATOM: {
FixedArray* arr = FixedArray::cast(data());
- ASSERT(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
+ CHECK(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
break;
}
case JSRegExp::IRREGEXP: {
@@ -639,26 +639,26 @@
// Smi : Not compiled yet (-1) or code prepared for flushing.
// JSObject: Compilation error.
// Code/ByteArray: Compiled code.
- ASSERT(ascii_data->IsSmi() ||
+ CHECK(ascii_data->IsSmi() ||
(is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- ASSERT(uc16_data->IsSmi() ||
+ CHECK(uc16_data->IsSmi() ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
- ASSERT(ascii_saved->IsSmi() || ascii_saved->IsString() ||
+ CHECK(ascii_saved->IsSmi() || ascii_saved->IsString() ||
ascii_saved->IsCode());
Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
- ASSERT(uc16_saved->IsSmi() || uc16_saved->IsString() ||
+ CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
uc16_saved->IsCode());
- ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
- ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
+ CHECK(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
+ CHECK(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
break;
}
default:
- ASSERT_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- ASSERT(data()->IsUndefined());
+ CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
+ CHECK(data()->IsUndefined());
break;
}
}
@@ -667,7 +667,7 @@
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
VerifyPointer(handler());
- ASSERT(hash()->IsSmi() || hash()->IsUndefined());
+ CHECK(hash()->IsSmi() || hash()->IsUndefined());
}
@@ -680,7 +680,7 @@
void Foreign::ForeignVerify() {
- ASSERT(IsForeign());
+ CHECK(IsForeign());
}
@@ -784,6 +784,47 @@
}
+void JSFunctionResultCache::JSFunctionResultCacheVerify() {
+ JSFunction::cast(get(kFactoryIndex))->Verify();
+
+ int size = Smi::cast(get(kCacheSizeIndex))->value();
+ CHECK(kEntriesIndex <= size);
+ CHECK(size <= length());
+ CHECK_EQ(0, size % kEntrySize);
+
+ int finger = Smi::cast(get(kFingerIndex))->value();
+ CHECK(kEntriesIndex <= finger);
+ CHECK((finger < size) || (finger == kEntriesIndex && finger == size));
+ CHECK_EQ(0, finger % kEntrySize);
+
+ if (FLAG_enable_slow_asserts) {
+ for (int i = kEntriesIndex; i < size; i++) {
+ CHECK(!get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ for (int i = size; i < length(); i++) {
+ CHECK(get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ }
+}
+
+
+void NormalizedMapCache::NormalizedMapCacheVerify() {
+ FixedArray::cast(this)->Verify();
+ if (FLAG_enable_slow_asserts) {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ if (e->IsMap()) {
+ Map::cast(e)->SharedMapVerify();
+ } else {
+ CHECK(e->IsUndefined());
+ }
+ }
+ }
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
@@ -802,7 +843,9 @@
VerifyPointer(break_point_objects());
}
#endif // ENABLE_DEBUGGER_SUPPORT
+#endif // VERIFY_HEAP
+#ifdef DEBUG
void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_objects_++;
@@ -962,63 +1005,6 @@
}
-void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->Verify();
-
- int size = Smi::cast(get(kCacheSizeIndex))->value();
- ASSERT(kEntriesIndex <= size);
- ASSERT(size <= length());
- ASSERT_EQ(0, size % kEntrySize);
-
- int finger = Smi::cast(get(kFingerIndex))->value();
- ASSERT(kEntriesIndex <= finger);
- ASSERT((finger < size) || (finger == kEntriesIndex && finger == size));
- ASSERT_EQ(0, finger % kEntrySize);
-
- if (FLAG_enable_slow_asserts) {
- for (int i = kEntriesIndex; i < size; i++) {
- ASSERT(!get(i)->IsTheHole());
- get(i)->Verify();
- }
- for (int i = size; i < length(); i++) {
- ASSERT(get(i)->IsTheHole());
- get(i)->Verify();
- }
- }
-}
-
-
-void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->Verify();
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsMap()) {
- Map::cast(e)->SharedMapVerify();
- } else {
- ASSERT(e->IsUndefined());
- }
- }
- }
-}
-
-
-void Map::ZapTransitions() {
- TransitionArray* transition_array = transitions();
- MemsetPointer(transition_array->data_start(),
- GetHeap()->the_hole_value(),
- transition_array->length());
-}
-
-
-void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = GetPrototypeTransitions();
- MemsetPointer(proto_transitions->data_start(),
- GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
#endif // DEBUG
} } // namespace v8::internal
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 9d81697..7e0ba9f 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -674,7 +674,7 @@
% JSFunctionResultCache::kEntrySize != 0) {
return false;
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<JSFunctionResultCache*>(this)->
JSFunctionResultCacheVerify();
@@ -689,7 +689,7 @@
if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
return false;
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
}
@@ -1110,13 +1110,13 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
- ASSERT(READ_FIELD(this, offset)->IsSmi());
+ CHECK(READ_FIELD(this, offset)->IsSmi());
}
#endif
@@ -3628,14 +3628,11 @@
void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
Object* back_pointer = GetBackPointer();
-#ifdef DEBUG
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- if (object->IsTransitionArray()) {
+
+ if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
ZapTransitions();
- } else {
- ASSERT(object->IsMap() || object->IsUndefined());
}
-#endif
+
WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer);
CONDITIONAL_WRITE_BARRIER(
heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode);
@@ -3757,12 +3754,11 @@
void Map::set_transitions(TransitionArray* transition_array,
WriteBarrierMode mode) {
-#ifdef DEBUG
- if (HasTransitionArray()) {
- ASSERT(transitions() != transition_array);
+ // In release mode, only run this code if verify_heap is on.
+ if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
+ CHECK(transitions() != transition_array);
ZapTransitions();
}
-#endif
WRITE_FIELD(this, kTransitionsOrBackPointerOffset, transition_array);
CONDITIONAL_WRITE_BARRIER(
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 3d4f1da..b704c1f 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -138,9 +138,9 @@
table_.Register(kVisitCode, &VisitCode);
- // Registration for kVisitSharedFunctionInfo is done by StaticVisitor.
+ table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
- // Registration for kVisitJSFunction is done by StaticVisitor.
+ table_.Register(kVisitJSFunction, &VisitJSFunction);
// Registration for kVisitJSRegExp is done by StaticVisitor.
@@ -214,9 +214,8 @@
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC || Serializer::enabled() ||
- heap->isolate()->context_exit_happened() ||
- target->ic_age() != heap->global_ic_age())) {
+ && (target->ic_state() == MEGAMORPHIC || heap->flush_monomorphic_ics() ||
+ Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
@@ -282,6 +281,71 @@
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
+ }
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ if (IsFlushable(heap, shared)) {
+ // This function's code looks flushable. But we have to postpone
+ // the decision until we see all functions that point to the same
+ // SharedFunctionInfo because some of them might be optimized.
+ // That would also make the non-optimized version of the code
+ // non-flushable, because it is required for bailing out from
+ // optimized code.
+ collector->code_flusher()->AddCandidate(shared);
+ // Treat the reference to the code object weakly.
+ VisitSharedFunctionInfoWeakCode(heap, object);
+ return;
+ }
+ }
+ VisitSharedFunctionInfoStrongCode(heap, object);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ JSFunction* function = JSFunction::cast(object);
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ if (IsFlushable(heap, function)) {
+ // This function's code looks flushable. But we have to postpone
+ // the decision until we see all functions that point to the same
+ // SharedFunctionInfo because some of them might be optimized.
+ // That would also make the non-optimized version of the code
+ // non-flushable, because it is required for bailing out from
+ // optimized code.
+ collector->code_flusher()->AddCandidate(function);
+ // Visit shared function info immediately to avoid double checking
+ // of its flushability later. This is just an optimization because
+ // the shared function info would eventually be visited.
+ SharedFunctionInfo* shared = function->unchecked_shared();
+ if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
+ StaticVisitor::MarkObject(heap, shared->map());
+ VisitSharedFunctionInfoWeakCode(heap, shared);
+ }
+ // Treat the reference to the code object weakly.
+ VisitJSFunctionWeakCode(heap, object);
+ return;
+ } else {
+ // Visit all unoptimized code objects to prevent flushing them.
+ StaticVisitor::MarkObject(heap, function->shared()->code());
+ if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+ MarkInlinedFunctionsCode(heap, function->code());
+ }
+ }
+ }
+ VisitJSFunctionStrongCode(heap, object);
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
Map* map, HeapObject* object) {
int last_property_offset =
@@ -353,6 +417,200 @@
}
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(
+ Heap* heap, Code* code) {
+ // For optimized functions we should retain both non-optimized version
+ // of its code and non-optimized version of all inlined functions.
+ // This is required to support bailing out from inlined code.
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ FixedArray* literals = data->LiteralArray();
+ for (int i = 0, count = data->InlinedFunctionCount()->value();
+ i < count;
+ i++) {
+ JSFunction* inlined = JSFunction::cast(literals->get(i));
+ StaticVisitor::MarkObject(heap, inlined->shared()->code());
+ }
+}
+
+
+inline static bool IsValidNonBuiltinContext(Object* context) {
+ return context->IsContext() &&
+ !Context::cast(context)->global_object()->IsJSBuiltinsObject();
+}
+
+
+inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+ Object* undefined = heap->undefined_value();
+ return (info->script() != undefined) &&
+ (reinterpret_cast<Script*>(info->script())->source() != undefined);
+}
+
+
+template<typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+ Heap* heap, JSFunction* function) {
+ SharedFunctionInfo* shared_info = function->unchecked_shared();
+
+ // Code is either on stack, in compilation cache or referenced
+ // by optimized version of function.
+ MarkBit code_mark = Marking::MarkBitFrom(function->code());
+ if (code_mark.Get()) {
+ if (!Marking::MarkBitFrom(shared_info).Get()) {
+ shared_info->set_code_age(0);
+ }
+ return false;
+ }
+
+ // The function must have a valid context and not be a builtin.
+ if (!IsValidNonBuiltinContext(function->unchecked_context())) {
+ return false;
+ }
+
+ // We do not flush code for optimized functions.
+ if (function->code() != shared_info->code()) {
+ return false;
+ }
+
+ return IsFlushable(heap, shared_info);
+}
+
+
+template<typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+ Heap* heap, SharedFunctionInfo* shared_info) {
+ // Code is either on stack, in compilation cache or referenced
+ // by optimized version of function.
+ MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
+ if (code_mark.Get()) {
+ return false;
+ }
+
+ // The function must be compiled and have the source code available,
+ // to be able to recompile it in case we need the function again.
+ if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
+ return false;
+ }
+
+ // We never flush code for API functions.
+ Object* function_data = shared_info->function_data();
+ if (function_data->IsFunctionTemplateInfo()) {
+ return false;
+ }
+
+ // Only flush code for functions.
+ if (shared_info->code()->kind() != Code::FUNCTION) {
+ return false;
+ }
+
+ // Function must be lazy compilable.
+ if (!shared_info->allows_lazy_compilation()) {
+ return false;
+ }
+
+ // If this is a full script wrapped in a function we do no flush the code.
+ if (shared_info->is_toplevel()) {
+ return false;
+ }
+
+ // TODO(mstarzinger): The following will soon be replaced by a new way of
+ // aging code, that is based on an aging stub in the function prologue.
+
+ // How many collections newly compiled code object will survive before being
+ // flushed.
+ static const int kCodeAgeThreshold = 5;
+
+ // Age this shared function info.
+ if (shared_info->code_age() < kCodeAgeThreshold) {
+ shared_info->set_code_age(shared_info->code_age() + 1);
+ return false;
+ }
+
+ return true;
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
+ Heap* heap, HeapObject* object) {
+ StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
+ Object** start_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kStartOffset);
+ Object** end_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
+ Heap* heap, HeapObject* object) {
+ StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
+ Object** name_slot =
+ HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
+ StaticVisitor::VisitPointer(heap, name_slot);
+
+ // Skip visiting kCodeOffset as it is treated weakly here.
+ STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
+ SharedFunctionInfo::kCodeOffset);
+ STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+
+ Object** start_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+ Object** end_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
+ Heap* heap, HeapObject* object) {
+ Object** start_slot =
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+ Object** end_slot =
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kPrototypeOrInitialMapOffset);
+
+ start_slot =
+ HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+ end_slot =
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
+ Heap* heap, HeapObject* object) {
+ Object** start_slot =
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+ Object** end_slot =
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+ // Skip visiting kCodeEntryOffset as it is treated weakly here.
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kPrototypeOrInitialMapOffset);
+
+ start_slot =
+ HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+ end_slot =
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 7a3b3f4..26d1b12 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -397,9 +397,16 @@
// TODO(mstarzinger): This should be made protected once refactoring is done.
static inline void VisitNativeContext(Map* map, HeapObject* object);
+ // TODO(mstarzinger): This should be made protected once refactoring is done.
+ // Mark non-optimize code for functions inlined into the given optimized
+ // code. This will prevent it from being flushed.
+ static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
+
protected:
static inline void VisitMap(Map* map, HeapObject* object);
static inline void VisitCode(Map* map, HeapObject* object);
+ static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object);
+ static inline void VisitJSFunction(Map* map, HeapObject* object);
static inline void VisitJSRegExp(Map* map, HeapObject* object);
// Mark pointers in a Map and its TransitionArray together, possibly
@@ -407,6 +414,17 @@
static void MarkMapContents(Heap* heap, Map* map);
static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
+ // Code flushing support.
+ static inline bool IsFlushable(Heap* heap, JSFunction* function);
+ static inline bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info);
+
+ // Helpers used by code flushing support that visit pointer fields and treat
+ // references to code objects either strongly or weakly.
+ static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
+ static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+
class DataObjectVisitor {
public:
template<int size>
diff --git a/src/objects.cc b/src/objects.cc
index aa59047..7dcefa2 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1806,9 +1806,6 @@
Map* map;
DescriptorArray* new_descriptors = new_map->instance_descriptors();
DescriptorArray* old_descriptors = old_map->instance_descriptors();
- if (old_descriptors->HasEnumCache()) {
- new_descriptors->CopyEnumCacheFrom(old_descriptors);
- }
for (Object* current = old_map;
!current->IsUndefined();
current = map->GetBackPointer()) {
@@ -1816,6 +1813,7 @@
if (!map->HasTransitionArray()) break;
TransitionArray* transitions = map->transitions();
if (transitions->descriptors() != old_descriptors) break;
+ map->SetEnumLength(Map::kInvalidEnumCache);
transitions->set_descriptors(new_descriptors);
}
old_map->set_owns_descriptors(false);
@@ -2166,12 +2164,8 @@
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
- if (trim_mode == FROM_GC) {
-#ifdef DEBUG
- ZapEndOfFixedArray(new_end, to_trim);
-#endif
- } else {
- ZapEndOfFixedArray(new_end, to_trim);
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
}
int size_delta = to_trim * kPointerSize;
@@ -3217,10 +3211,12 @@
Object* result = get(index);
if (result->IsMap() &&
Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Map::cast(result)->SharedMapVerify();
}
+#endif
+#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
@@ -4936,7 +4932,7 @@
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
result->set_dictionary_map(true);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap && result->is_shared()) {
result->SharedMapVerify();
}
@@ -6098,20 +6094,13 @@
Object* new_index_cache) {
ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
- if (HasEnumCache()) {
- ASSERT(new_cache->length() > GetEnumCache()->length());
- FixedArray::cast(get(kEnumCacheIndex))->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(get(kEnumCacheIndex))->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- } else {
- ASSERT(!IsEmpty());
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- set(kEnumCacheIndex, bridge_storage);
- }
+ ASSERT(!IsEmpty());
+ ASSERT(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
+ FixedArray::cast(bridge_storage)->
+ set(kEnumCacheBridgeCacheIndex, new_cache);
+ FixedArray::cast(bridge_storage)->
+ set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
+ set(kEnumCacheIndex, bridge_storage);
}
@@ -9046,6 +9035,22 @@
}
+void Map::ZapTransitions() {
+ TransitionArray* transition_array = transitions();
+ MemsetPointer(transition_array->data_start(),
+ GetHeap()->the_hole_value(),
+ transition_array->length());
+}
+
+
+void Map::ZapPrototypeTransitions() {
+ FixedArray* proto_transitions = GetPrototypeTransitions();
+ MemsetPointer(proto_transitions->data_start(),
+ GetHeap()->the_hole_value(),
+ proto_transitions->length());
+}
+
+
MaybeObject* JSReceiver::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
diff --git a/src/objects.h b/src/objects.h
index 63a3b1b..edc85de 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -731,6 +731,11 @@
// A template-ized version of the IsXXX functions.
template <class C> static inline bool Is(Object* obj);
+#ifdef VERIFY_HEAP
+#define DECLARE_VERIFIER(Name) void Name##Verify();
+#else
+#define DECLARE_VERIFIER(Name)
+#endif
class MaybeObject BASE_EMBEDDED {
public:
@@ -775,7 +780,7 @@
void Print(FILE* out);
void PrintLn(FILE* out);
#endif
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verifies the object.
void Verify();
#endif
@@ -982,7 +987,7 @@
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
static void VerifyPointer(Object* p);
#endif
@@ -1037,9 +1042,8 @@
}
void SmiPrint(FILE* out);
void SmiPrint(StringStream* accumulator);
-#ifdef DEBUG
- void SmiVerify();
-#endif
+
+ DECLARE_VERIFIER(Smi)
static const int kMinValue =
(static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
@@ -1110,9 +1114,8 @@
}
void FailurePrint(FILE* out);
void FailurePrint(StringStream* accumulator);
-#ifdef DEBUG
- void FailureVerify();
-#endif
+
+ DECLARE_VERIFIER(Failure)
private:
inline intptr_t value() const;
@@ -1243,9 +1246,8 @@
void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
-
-#ifdef DEBUG
- void HeapObjectVerify();
+ DECLARE_VERIFIER(HeapObject)
+#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
@@ -1331,9 +1333,7 @@
}
void HeapNumberPrint(FILE* out);
void HeapNumberPrint(StringStream* accumulator);
-#ifdef DEBUG
- void HeapNumberVerify();
-#endif
+ DECLARE_VERIFIER(HeapNumber)
inline int get_exponent();
inline int get_sign();
@@ -2078,9 +2078,7 @@
}
void JSObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
inline void PrintProperties() {
PrintProperties(stdout);
@@ -2362,8 +2360,8 @@
}
void FixedArrayPrint(FILE* out);
#endif
+ DECLARE_VERIFIER(FixedArray)
#ifdef DEBUG
- void FixedArrayVerify();
// Checks if two FixedArrays have identical contents.
bool IsEqualTo(FixedArray* other);
#endif
@@ -2449,10 +2447,7 @@
}
void FixedDoubleArrayPrint(FILE* out);
#endif
-
-#ifdef DEBUG
- void FixedDoubleArrayVerify();
-#endif
+ DECLARE_VERIFIER(FixedDoubleArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
@@ -3387,9 +3382,7 @@
// Casting
static inline JSFunctionResultCache* cast(Object* obj);
-#ifdef DEBUG
- void JSFunctionResultCacheVerify();
-#endif
+ DECLARE_VERIFIER(JSFunctionResultCache)
};
@@ -3609,9 +3602,7 @@
// Casting
static inline NormalizedMapCache* cast(Object* obj);
-#ifdef DEBUG
- void NormalizedMapCacheVerify();
-#endif
+ DECLARE_VERIFIER(NormalizedMapCache)
};
@@ -3660,9 +3651,7 @@
}
void ByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ByteArrayVerify();
-#endif
+ DECLARE_VERIFIER(ByteArray)
// Layout description.
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
@@ -3696,9 +3685,7 @@
}
void FreeSpacePrint(FILE* out);
#endif
-#ifdef DEBUG
- void FreeSpaceVerify();
-#endif
+ DECLARE_VERIFIER(FreeSpace)
// Layout description.
// Size is smi tagged when it is stored.
@@ -3778,9 +3765,7 @@
}
void ExternalPixelArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalPixelArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalPixelArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
@@ -3807,9 +3792,7 @@
}
void ExternalByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalByteArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalByteArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
@@ -3836,9 +3819,7 @@
}
void ExternalUnsignedByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedByteArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedByteArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
@@ -3865,9 +3846,7 @@
}
void ExternalShortArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalShortArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalShortArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
@@ -3894,9 +3873,7 @@
}
void ExternalUnsignedShortArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedShortArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedShortArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
@@ -3923,9 +3900,7 @@
}
void ExternalIntArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalIntArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalIntArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
@@ -3952,9 +3927,7 @@
}
void ExternalUnsignedIntArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedIntArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedIntArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
@@ -3981,9 +3954,7 @@
}
void ExternalFloatArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalFloatArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalFloatArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
@@ -4010,9 +3981,7 @@
}
void ExternalDoubleArrayPrint(FILE* out);
#endif // OBJECT_PRINT
-#ifdef DEBUG
- void ExternalDoubleArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalDoubleArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
@@ -4535,9 +4504,8 @@
}
void CodePrint(FILE* out);
#endif
-#ifdef DEBUG
- void CodeVerify();
-#endif
+ DECLARE_VERIFIER(Code)
+
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
@@ -5097,14 +5065,14 @@
Handle<Map> FindTransitionedMap(MapHandleList* candidates);
Map* FindTransitionedMap(MapList* candidates);
- // Zaps the contents of backing data structures in debug mode. Note that the
+ // Zaps the contents of backing data structures. Note that the
// heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
// holding weak references when incremental marking is used, because it also
// iterates over objects that are otherwise unreachable.
-#ifdef DEBUG
+ // In general we only want to call these functions in release mode when
+ // heap verification is turned on.
void ZapPrototypeTransitions();
void ZapTransitions();
-#endif
// Dispatched behavior.
#ifdef OBJECT_PRINT
@@ -5113,8 +5081,9 @@
}
void MapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void MapVerify();
+ DECLARE_VERIFIER(Map)
+
+#ifdef VERIFY_HEAP
void SharedMapVerify();
#endif
@@ -5317,9 +5286,7 @@
}
void ScriptPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ScriptVerify();
-#endif
+ DECLARE_VERIFIER(Script)
static const int kSourceOffset = HeapObject::kHeaderSize;
static const int kNameOffset = kSourceOffset + kPointerSize;
@@ -5799,9 +5766,7 @@
}
void SharedFunctionInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void SharedFunctionInfoVerify();
-#endif
+ DECLARE_VERIFIER(SharedFunctionInfo)
void ResetForNewContext(int new_ic_age);
@@ -6035,9 +6000,7 @@
}
void JSModulePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSModuleVerify();
-#endif
+ DECLARE_VERIFIER(JSModule)
// Layout description.
static const int kContextOffset = JSObject::kHeaderSize;
@@ -6198,9 +6161,7 @@
}
void JSFunctionPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSFunctionVerify();
-#endif
+ DECLARE_VERIFIER(JSFunction)
// Returns the number of allocated literals.
inline int NumberOfLiterals();
@@ -6259,9 +6220,7 @@
}
void JSGlobalProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSGlobalProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSGlobalProxy)
// Layout description.
static const int kNativeContextOffset = JSObject::kHeaderSize;
@@ -6339,9 +6298,7 @@
}
void JSGlobalObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSGlobalObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
static const int kSize = GlobalObject::kHeaderSize;
@@ -6373,9 +6330,7 @@
}
void JSBuiltinsObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSBuiltinsObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSBuiltinsObject)
// Layout description. The size of the builtins object includes
// room for two pointers per runtime routine written in javascript
@@ -6416,9 +6371,7 @@
}
void JSValuePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSValueVerify();
-#endif
+ DECLARE_VERIFIER(JSValue)
// Layout description.
static const int kValueOffset = JSObject::kHeaderSize;
@@ -6472,9 +6425,8 @@
}
void JSDatePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSDateVerify();
-#endif
+ DECLARE_VERIFIER(JSDate)
+
// The order is important. It must be kept in sync with date macros
// in macros.py.
enum FieldIndex {
@@ -6570,9 +6522,7 @@
}
void JSMessageObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSMessageObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSMessageObject)
// Layout description.
static const int kTypeOffset = JSObject::kHeaderSize;
@@ -6661,9 +6611,7 @@
static inline JSRegExp* cast(Object* obj);
// Dispatched behavior.
-#ifdef DEBUG
- void JSRegExpVerify();
-#endif
+ DECLARE_VERIFIER(JSRegExp)
static const int kDataOffset = JSObject::kHeaderSize;
static const int kSize = kDataOffset + kPointerSize;
@@ -6819,9 +6767,7 @@
}
void CodeCachePrint(FILE* out);
#endif
-#ifdef DEBUG
- void CodeCacheVerify();
-#endif
+ DECLARE_VERIFIER(CodeCache)
static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
static const int kNormalTypeCacheOffset =
@@ -6910,9 +6856,7 @@
}
void PolymorphicCodeCachePrint(FILE* out);
#endif
-#ifdef DEBUG
- void PolymorphicCodeCacheVerify();
-#endif
+ DECLARE_VERIFIER(PolymorphicCodeCache)
static const int kCacheOffset = HeapObject::kHeaderSize;
static const int kSize = kCacheOffset + kPointerSize;
@@ -6965,9 +6909,7 @@
}
void TypeFeedbackInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void TypeFeedbackInfoVerify();
-#endif
+ DECLARE_VERIFIER(TypeFeedbackInfo)
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
@@ -7013,9 +6955,7 @@
}
void AliasedArgumentsEntryPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AliasedArgumentsEntryVerify();
-#endif
+ DECLARE_VERIFIER(AliasedArgumentsEntry)
static const int kAliasedContextSlot = HeapObject::kHeaderSize;
static const int kSize = kAliasedContextSlot + kPointerSize;
@@ -7362,9 +7302,8 @@
char* ToAsciiArray();
#endif
-#ifdef DEBUG
- void StringVerify();
-#endif
+ DECLARE_VERIFIER(String)
+
inline bool IsFlat();
// Layout description.
@@ -7601,9 +7540,7 @@
unsigned* offset,
unsigned chars);
-#ifdef DEBUG
- void SeqAsciiStringVerify();
-#endif
+ DECLARE_VERIFIER(SeqAsciiString)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
@@ -7708,9 +7645,7 @@
typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
BodyDescriptor;
-#ifdef DEBUG
- void ConsStringVerify();
-#endif
+ DECLARE_VERIFIER(ConsString)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
@@ -7762,9 +7697,7 @@
kOffsetOffset + kPointerSize, kSize>
BodyDescriptor;
-#ifdef DEBUG
- void SlicedStringVerify();
-#endif
+ DECLARE_VERIFIER(SlicedString)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
@@ -7991,9 +7924,7 @@
static inline Oddball* cast(Object* obj);
// Dispatched behavior.
-#ifdef DEBUG
- void OddballVerify();
-#endif
+ DECLARE_VERIFIER(Oddball)
// Initialize the fields.
MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
@@ -8044,9 +7975,8 @@
return address() + kValueOffset;
}
-#ifdef DEBUG
- void JSGlobalPropertyCellVerify();
-#endif
+ DECLARE_VERIFIER(JSGlobalPropertyCell)
+
#ifdef OBJECT_PRINT
inline void JSGlobalPropertyCellPrint() {
JSGlobalPropertyCellPrint(stdout);
@@ -8149,9 +8079,7 @@
}
void JSProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSProxy)
// Layout description. We add padding so that a proxy has the same
// size as a virgin JSObject. This is essential for becoming a JSObject
@@ -8192,9 +8120,7 @@
}
void JSFunctionProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSFunctionProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSFunctionProxy)
// Layout description.
static const int kCallTrapOffset = JSProxy::kPaddingOffset;
@@ -8229,9 +8155,7 @@
}
void JSSetPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSSetVerify();
-#endif
+ DECLARE_VERIFIER(JSSet)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kSize = kTableOffset + kPointerSize;
@@ -8256,9 +8180,7 @@
}
void JSMapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSMapVerify();
-#endif
+ DECLARE_VERIFIER(JSMap)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kSize = kTableOffset + kPointerSize;
@@ -8286,9 +8208,7 @@
}
void JSWeakMapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSWeakMapVerify();
-#endif
+ DECLARE_VERIFIER(JSWeakMap)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kNextOffset = kTableOffset + kPointerSize;
@@ -8323,9 +8243,7 @@
}
void ForeignPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ForeignVerify();
-#endif
+ DECLARE_VERIFIER(Foreign)
// Layout description.
@@ -8382,9 +8300,7 @@
}
void JSArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSArrayVerify();
-#endif
+ DECLARE_VERIFIER(JSArray)
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -8463,9 +8379,7 @@
}
void AccessorInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AccessorInfoVerify();
-#endif
+ DECLARE_VERIFIER(AccessorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8529,9 +8443,7 @@
#ifdef OBJECT_PRINT
void AccessorPairPrint(FILE* out = stdout);
#endif
-#ifdef DEBUG
- void AccessorPairVerify();
-#endif
+ DECLARE_VERIFIER(AccessorPair)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8565,9 +8477,7 @@
}
void AccessCheckInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AccessCheckInfoVerify();
-#endif
+ DECLARE_VERIFIER(AccessCheckInfo)
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
@@ -8596,9 +8506,7 @@
}
void InterceptorInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void InterceptorInfoVerify();
-#endif
+ DECLARE_VERIFIER(InterceptorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8626,9 +8534,7 @@
}
void CallHandlerInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void CallHandlerInfoVerify();
-#endif
+ DECLARE_VERIFIER(CallHandlerInfo)
static const int kCallbackOffset = HeapObject::kHeaderSize;
static const int kDataOffset = kCallbackOffset + kPointerSize;
@@ -8644,9 +8550,7 @@
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(property_list, Object)
-#ifdef DEBUG
- void TemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(TemplateInfo)
static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
@@ -8689,9 +8593,7 @@
}
void FunctionTemplateInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void FunctionTemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(FunctionTemplateInfo)
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
@@ -8738,9 +8640,7 @@
}
void ObjectTemplateInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ObjectTemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
static const int kInternalFieldCountOffset =
@@ -8762,9 +8662,7 @@
}
void SignatureInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void SignatureInfoVerify();
-#endif
+ DECLARE_VERIFIER(SignatureInfo)
static const int kReceiverOffset = Struct::kHeaderSize;
static const int kArgsOffset = kReceiverOffset + kPointerSize;
@@ -8787,9 +8685,7 @@
}
void TypeSwitchInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void TypeSwitchInfoVerify();
-#endif
+ DECLARE_VERIFIER(TypeSwitchInfo)
static const int kTypesOffset = Struct::kHeaderSize;
static const int kSize = kTypesOffset + kPointerSize;
@@ -8839,9 +8735,7 @@
}
void DebugInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void DebugInfoVerify();
-#endif
+ DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
@@ -8897,9 +8791,7 @@
}
void BreakPointInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void BreakPointInfoVerify();
-#endif
+ DECLARE_VERIFIER(BreakPointInfo)
static const int kCodePositionIndex = Struct::kHeaderSize;
static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
@@ -8917,6 +8809,7 @@
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_ACCESSORS
+#undef DECLARE_VERIFIER
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
V(kSymbolTable, "symbol_table", "(Symbols)") \
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2cca470..f50d85c 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -3107,13 +3107,13 @@
// stable. It should follow TagGlobalObjects as that can allocate.
AssertNoAllocation no_alloc;
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
SetProgressTotal(1); // 1 pass.
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
diff --git a/src/runtime.cc b/src/runtime.cc
index 10ff145..356b6fc 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -11793,6 +11793,15 @@
scope_info,
function_context);
+ // Check if eval is blocked in the context and temporarily allow it
+ // for debugger.
+ Handle<Context> native_context = Handle<Context>(context->native_context());
+ bool eval_disabled =
+ native_context->allow_code_gen_from_strings()->IsFalse();
+ if (eval_disabled) {
+ native_context->set_allow_code_gen_from_strings(
+ isolate->heap()->true_value());
+ }
// Invoke the evaluation function and return the result.
Handle<Object> argv[] = { arguments, source };
Handle<Object> result =
@@ -11801,6 +11810,10 @@
ARRAY_SIZE(argv),
argv,
&has_pending_exception);
+ if (eval_disabled) {
+ native_context->set_allow_code_gen_from_strings(
+ isolate->heap()->false_value());
+ }
if (has_pending_exception) return Failure::Exception();
// Skip the global proxy as it has no properties and always delegates to the
@@ -13020,7 +13033,7 @@
if (pending_exception) return Failure::Exception();
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
cache_handle->JSFunctionResultCacheVerify();
}
@@ -13051,7 +13064,7 @@
cache_handle->set(index + 1, *value);
cache_handle->set_finger_index(index);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
cache_handle->JSFunctionResultCacheVerify();
}
diff --git a/src/spaces.cc b/src/spaces.cc
index bc1d7b0..cc84180 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -497,6 +497,7 @@
VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
+
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
@@ -529,10 +530,11 @@
size_executable_ += reservation.size();
}
-#ifdef DEBUG
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, CodePageGuardStartOffset());
+ ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ }
+
area_start = base + CodePageAreaStartOffset();
area_end = area_start + body_size;
} else {
@@ -544,9 +546,9 @@
if (base == NULL) return NULL;
-#ifdef DEBUG
- ZapBlock(base, chunk_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, chunk_size);
+ }
area_start = base + Page::kObjectStartOffset;
area_end = base + chunk_size;
@@ -622,9 +624,11 @@
size_t size,
Executability executable) {
if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size);
+ }
+
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
@@ -985,8 +989,7 @@
void PagedSpace::Print() { }
#endif
-
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (was_swept_conservatively_) return;
@@ -996,23 +999,23 @@
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
- ASSERT(page->owner() == this);
+ CHECK(page->owner() == this);
if (page == Page::FromAllocationTop(allocation_info_.top)) {
allocation_pointer_found_in_space = true;
}
- ASSERT(page->WasSweptPrecisely());
+ CHECK(page->WasSweptPrecisely());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- ASSERT(end_of_previous_object <= object->address());
+ CHECK(end_of_previous_object <= object->address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1027,15 +1030,14 @@
black_size += size;
}
- ASSERT(object->address() + size <= top);
+ CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- ASSERT_LE(black_size, page->LiveBytes());
+ CHECK_LE(black_size, page->LiveBytes());
}
- ASSERT(allocation_pointer_found_in_space);
+ CHECK(allocation_pointer_found_in_space);
}
-#endif
-
+#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
// NewSpace implementation
@@ -1259,7 +1261,7 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
@@ -1308,8 +1310,8 @@
}
// Check semi-spaces.
- ASSERT_EQ(from_space_.id(), kFromSpace);
- ASSERT_EQ(to_space_.id(), kToSpace);
+ CHECK_EQ(from_space_.id(), kFromSpace);
+ CHECK_EQ(to_space_.id(), kToSpace);
from_space_.Verify();
to_space_.Verify();
}
@@ -1525,8 +1527,9 @@
#ifdef DEBUG
void SemiSpace::Print() { }
+#endif
-
+#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
NewSpacePage* page = anchor_.next_page();
@@ -1556,8 +1559,9 @@
page = page->next_page();
}
}
+#endif
-
+#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
NewSpacePage* page = NewSpacePage::FromLimit(start);
@@ -2551,25 +2555,27 @@
// -----------------------------------------------------------------------------
// MapSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap() || object->IsFreeSpace());
}
-#endif
// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
- ASSERT(object->IsJSGlobalPropertyCell() ||
+ CHECK(object->IsJSGlobalPropertyCell() ||
object->map() == heap()->two_pointer_filler_map());
}
-#endif
// -----------------------------------------------------------------------------
@@ -2679,12 +2685,13 @@
HeapObject* object = page->GetObject();
-#ifdef DEBUG
- // Make the object consistent so the heap can be vefified in OldSpaceStep.
- reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ // Make the object consistent so the heap can be verified in OldSpaceStep.
+ // We only need to do this in debug builds or if verify_heap is on.
+ reinterpret_cast<Object**>(object->address())[0] =
+ heap()->fixed_array_map();
+ reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+ }
heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
@@ -2783,7 +2790,7 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
@@ -2794,18 +2801,18 @@
// object area start.
HeapObject* object = chunk->GetObject();
Page* page = Page::FromAddress(object->address());
- ASSERT(object->address() == page->area_start());
+ CHECK(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, and byte arrays in large object space.
- ASSERT(object->IsCode() || object->IsSeqString() ||
+ CHECK(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
object->IsFixedDoubleArray() || object->IsByteArray());
@@ -2824,15 +2831,17 @@
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(heap()->Contains(element_object));
- ASSERT(element_object->map()->IsMap());
+ CHECK(heap()->Contains(element_object));
+ CHECK(element_object->map()->IsMap());
}
}
}
}
}
+#endif
+#ifdef DEBUG
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
diff --git a/src/spaces.h b/src/spaces.h
index effe06b..7ed0977 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1574,19 +1574,21 @@
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
-#ifdef DEBUG
- // Print meta info and objects in this space.
- virtual void Print();
-
+#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
- // Reports statistics for the space
- void ReportStatistics();
-
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
+#endif
+
+#ifdef DEBUG
+ // Print meta info and objects in this space.
+ virtual void Print();
+
+ // Reports statistics for the space
+ void ReportStatistics();
// Report code object related statistics
void CollectCodeStatistics();
@@ -1934,9 +1936,12 @@
NewSpacePage* first_page() { return anchor_.next_page(); }
NewSpacePage* current_page() { return current_page_; }
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
+
#ifdef DEBUG
virtual void Print();
- virtual void Verify();
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
@@ -2261,9 +2266,12 @@
template <typename StringType>
inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify();
+#endif
+
+#ifdef DEBUG
// Print the active semispace.
virtual void Print() { to_space_.Print(); }
#endif
@@ -2433,9 +2441,7 @@
}
protected:
-#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
-#endif
private:
static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
@@ -2471,9 +2477,7 @@
}
protected:
-#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
-#endif
public:
TRACK_MEMORY("CellSpace")
@@ -2552,8 +2556,11 @@
LargePage* first_page() { return first_page_; }
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
virtual void Verify();
+#endif
+
+#ifdef DEBUG
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 3852155..66488ae 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -372,7 +372,7 @@
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
// Do nothing.
}
@@ -415,7 +415,7 @@
void StoreBuffer::Verify() {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
VerifyPointers(heap_->old_pointer_space(),
&StoreBuffer::FindPointersToNewSpaceInRegion);
VerifyPointers(heap_->map_space(),
@@ -427,9 +427,11 @@
void StoreBuffer::GCEpilogue() {
during_gc_ = false;
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
diff --git a/src/store-buffer.h b/src/store-buffer.h
index 951a9ca..0ade8ce 100644
--- a/src/store-buffer.h
+++ b/src/store-buffer.h
@@ -195,7 +195,7 @@
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
void VerifyPointers(LargeObjectSpace* space);
#endif
diff --git a/src/version.cc b/src/version.cc
index 30bcb09..a9f2cb7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 14
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0bad960..675d404 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1297,11 +1297,9 @@
&allocation_failed,
TAG_OBJECT);
// Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
@@ -2049,10 +2047,7 @@
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
- if (FLAG_debug_code) {
- // Second should be non-smi if we get here.
- __ AbortIfSmi(second);
- }
+ __ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert second to smi, if possible.
@@ -5929,8 +5924,7 @@
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r0);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 1949c31..475fb9d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -2580,7 +2580,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (generate_debug_code_) __ AbortIfSmi(rax);
+ __ AssertNotSmi(rax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2809,7 +2809,7 @@
__ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (generate_debug_code_) __ AbortIfNotSmi(rax);
+ __ AssertSmi(rax);
context()->Plug(rax);
}
@@ -3459,7 +3459,7 @@
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AbortIfNotString(rax);
+ __ AssertString(rax);
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 8547b09..c08ca7b 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1993,7 +1993,7 @@
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ AbortIfNotString(input);
+ __ AssertString(input);
__ movl(result, FieldOperand(input, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -2601,16 +2601,13 @@
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
Register result = ToRegister(instr->result());
-
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
if (instr->index()->IsRegister()) {
__ subl(length, ToRegister(instr->index()));
} else {
__ subl(length, ToOperand(instr->index()));
}
- DeoptimizeIf(below_equal, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
__ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
@@ -3756,9 +3753,8 @@
instr->index());
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (FLAG_debug_code &&
- !instr->hydrogen()->length()->representation().IsTagged()) {
- __ AbortIfNotZeroExtended(reg);
+ if (!instr->hydrogen()->length()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
int constant_index =
@@ -3770,9 +3766,8 @@
}
} else {
Register reg2 = ToRegister(instr->index());
- if (FLAG_debug_code &&
- !instr->hydrogen()->index()->representation().IsTagged()) {
- __ AbortIfNotZeroExtended(reg2);
+ if (!instr->hydrogen()->index()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg2);
}
__ cmpq(reg, reg2);
}
@@ -3991,9 +3986,7 @@
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(rax);
- }
+ __ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4208,9 +4201,7 @@
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr->environment());
} else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(input);
- }
+ __ AssertSmi(input);
}
__ SmiToInteger32(input, input);
}
@@ -4620,7 +4611,7 @@
__ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
- __ AbortIfSmi(map);
+ __ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
Immediate(instance_size >> kPointerSizeLog2));
__ Assert(equal, "Unexpected instance size");
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index dd3054f..add5676 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -2167,12 +2167,10 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index e8b363e..49849d8 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -396,9 +396,7 @@
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
@@ -1115,18 +1113,14 @@
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- if (emit_debug_code()) {
- AbortIfNotSmi(smi1);
- AbortIfNotSmi(smi2);
- }
+ AssertSmi(smi1);
+ AssertSmi(smi2);
cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
Cmp(dst, src);
}
@@ -1143,27 +1137,21 @@
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@@ -2971,61 +2959,75 @@
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok, Label::kNear);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, "Operand is not a number");
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(NegateCondition(is_smi), "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), "Operand is a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(const Operand& object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(const Operand& object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
- cmpq(kScratchRegister, int32_register);
- Assert(above_equal, "32 bit value in register is not zero-extended");
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
+ if (emit_debug_code()) {
+ ASSERT(!int32_register.is(kScratchRegister));
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ cmpq(kScratchRegister, int32_register);
+ Check(above_equal, "32 bit value in register is not zero-extended");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- testb(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi and not a string");
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ ASSERT(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpq(src, kScratchRegister);
+ Check(equal, message);
+ }
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 89b7962..8c1c101 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -960,27 +960,28 @@
shl(reg, Immediate(kSmiShift));
}
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
- void AbortIfNotSmi(const Operand& object);
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+ void AssertSmi(const Operand& object);
// Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits.
- void AbortIfNotZeroExtended(Register reg);
+ // have zeros in the top 32 bits, enabled via --debug-code.
+ void AssertZeroExtended(Register reg);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// Exception handling