Version 3.11.10
Implemented heap profiler memory usage reporting.
Preserved error message during finally block in try..finally. (Chromium issue 129171)
Fixed EnsureCanContainElements to properly handle double values. (issue 2170)
Improved heuristics to keep objects in fast mode with inherited constructors.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@11798 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 6f38ba7..0d88047 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -990,6 +990,12 @@
}
+Local<AccessorSignature> AccessorSignature::New(
+ Handle<FunctionTemplate> receiver) {
+ return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
+}
+
+
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types);
@@ -1057,7 +1063,8 @@
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
@@ -1069,6 +1076,9 @@
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+ if (!signature.IsEmpty()) {
+ obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
+ }
return obj;
}
@@ -1079,7 +1089,8 @@
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
@@ -1088,9 +1099,9 @@
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
+ i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
+ settings, attributes,
+ signature);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
if (list->IsUndefined()) {
list = NeanderArray().value();
@@ -1275,7 +1286,8 @@
AccessorSetter setter,
v8::Handle<Value> data,
AccessControl settings,
- PropertyAttribute attribute) {
+ PropertyAttribute attribute,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8(isolate);
@@ -1289,7 +1301,8 @@
setter,
data,
settings,
- attribute);
+ attribute,
+ signature);
}
@@ -3079,9 +3092,10 @@
ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
+ v8::Handle<AccessorSignature> signature;
+ i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
+ settings, attributes,
+ signature);
bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
if (result.is_null() || result->IsUndefined()) return false;
@@ -6256,6 +6270,11 @@
}
+size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
+ return i::HeapProfiler::GetMemorySizeUsedByProfiler();
+}
+
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
diff --git a/src/api.h b/src/api.h
index 05e5e72..58e6a6e 100644
--- a/src/api.h
+++ b/src/api.h
@@ -200,6 +200,8 @@
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
static inline Local<Signature> ToLocal(
v8::internal::Handle<v8::internal::SignatureInfo> obj);
+ static inline Local<AccessorSignature> AccessorSignatureToLocal(
+ v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
@@ -233,6 +235,8 @@
OpenHandle(const v8::Context* context);
static inline v8::internal::Handle<v8::internal::SignatureInfo>
OpenHandle(const v8::Signature* sig);
+ static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
+ OpenHandle(const v8::AccessorSignature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Foreign>
@@ -276,6 +280,7 @@
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
@@ -300,6 +305,7 @@
MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
MAKE_OPEN_HANDLE(Signature, SignatureInfo)
+MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
MAKE_OPEN_HANDLE(Data, Object)
MAKE_OPEN_HANDLE(RegExp, JSRegExp)
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 2296490..761123f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -3737,9 +3737,13 @@
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ {
+ // Prevent literal pool emission before return address.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ masm->add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ masm->Jump(r5);
+ }
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -3956,14 +3960,21 @@
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+
+ // Block literal pool emission whilst taking the position of the handler
+ // entry. This avoids making the assumption that literal pools are always
+ // emitted after an instruction is emitted, rather than before.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
@@ -4006,9 +4017,13 @@
// Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // inserting instructions here after we read the pc. We block literal pool
+ // emission for the same reason.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ mov(lr, Operand(pc));
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -6812,6 +6827,10 @@
Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
+
+ // Prevent literal pool emission during calculation of return address.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+
// Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated.
Label start;
@@ -7172,8 +7191,13 @@
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
+ {
+ // Block literal pool emission, as the position of these two instructions
+ // is assumed by the patching code.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+ }
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 2a5887a..ff7c3c1 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -73,9 +73,6 @@
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
- // Don't use b(al, ...) as that might emit the constant pool right after the
- // branch. After patching when the branch is no longer unconditional
- // execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
@@ -90,6 +87,8 @@
}
void EmitPatchInfo() {
+ // Block literal pool emission whilst recording patch site information.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
@@ -344,6 +343,8 @@
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
+ // Block literal pools whilst emitting stack check code.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
if (FLAG_count_based_interrupts) {
@@ -808,10 +809,11 @@
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ : isolate()->factory()->undefined_value(),
+ zone());
break;
case Variable::PARAMETER:
@@ -867,12 +869,12 @@
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
+ globals_->Add(function, zone());
break;
}
@@ -926,8 +928,8 @@
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
@@ -1603,7 +1605,7 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -4492,14 +4494,55 @@
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(kSmiTag == 0);
__ add(r1, r1, Operand(r1)); // Convert to smi.
+
+ // Store result register while executing finally block.
+ __ push(r1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ ldr(r1, MemOperand(ip));
+ __ push(r1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ ldr(r1, MemOperand(ip));
+ __ push(r1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ ldr(r1, MemOperand(ip));
__ push(r1);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
+ // Restore pending message from stack.
+ __ pop(r1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ str(r1, MemOperand(ip));
+
+ __ pop(r1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ str(r1, MemOperand(ip));
+
+ __ pop(r1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ str(r1, MemOperand(ip));
+
// Restore result register from stack.
__ pop(r1);
+
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 0f05453..283862c 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -411,9 +411,9 @@
: spill_slot_count_(0),
info_(info),
graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) {
}
@@ -427,9 +427,9 @@
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
+ return LStackSlot::Create(index, zone());
}
}
@@ -474,23 +474,23 @@
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
- instructions_.Add(gap);
+ instructions_.Add(gap, zone());
index = instructions_.length();
- instructions_.Add(instr);
+ instructions_.Add(instr, zone());
} else {
index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
+ pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
+ return LConstantOperand::Create(constant->id(), zone());
}
@@ -529,7 +529,8 @@
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
}
@@ -762,7 +763,7 @@
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -1345,7 +1346,8 @@
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
- return constant_val->CopyToRepresentation(Representation::Integer32());
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
}
}
return NULL;
@@ -1361,7 +1363,7 @@
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
return AssignEnvironment(DefineAsRegister(
- new LMathFloorOfDiv(dividend, divisor, remainder)));
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@@ -1658,7 +1660,8 @@
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1707,10 +1710,9 @@
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+ if (instr->value()->type().IsSmi()) {
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@@ -2170,7 +2172,8 @@
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 39894a4..869a80a 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -333,8 +333,10 @@
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -2267,9 +2269,11 @@
}
void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
+ inlined_closures_.Add(closure, zone());
}
+ Zone* zone() const { return graph_->zone(); }
+
private:
int spill_slot_count_;
CompilationInfo* info_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 1d87c6a..256d180 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -571,6 +571,9 @@
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
@@ -639,7 +642,7 @@
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -671,7 +674,7 @@
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry));
+ deopt_jump_table_.Add(JumpTableEntry(entry), zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
@@ -716,7 +719,7 @@
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -764,12 +767,12 @@
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ safepoint.DefinePointerRegister(cp, zone());
}
}
@@ -781,7 +784,7 @@
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -1191,7 +1194,7 @@
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new DeferredDivI(this, instr);
+ DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
__ TrySmiTag(right, &deoptimize, scratch);
@@ -1685,6 +1688,9 @@
ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -2297,7 +2303,7 @@
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
@@ -2316,20 +2322,25 @@
Label cache_miss;
Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ {
+ // Block constant pool emission to ensure the positions of instructions are
+ // as expected by the patcher. See InstanceofStub::Generate().
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ Handle<JSGlobalPropertyCell> cell =
+ factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(map, Operand(ip));
+ __ b(ne, &cache_miss);
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(factory()->the_hole_value()));
+ }
__ b(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2560,12 +2571,12 @@
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsFound() && lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2577,9 +2588,23 @@
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ HeapObject* current = HeapObject::cast((*type)->prototype());
+ Heap* heap = type->GetHeap();
+ while (current != heap->null_value()) {
+ Handle<HeapObject> link(current);
+ __ LoadHeapObject(result, link);
+ __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map())));
+ DeoptimizeIf(ne, env);
+ current = HeapObject::cast(current->map()->prototype());
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
@@ -2587,7 +2612,7 @@
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
+ Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
@@ -2598,18 +2623,24 @@
}
Handle<String> name = instr->hydrogen()->name();
Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(scratch, Operand(map));
+ Label check_passed;
+ __ CompareMap(
+ object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
__ b(ne, &next);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
__ b(&done);
__ bind(&next);
}
@@ -2753,9 +2784,14 @@
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ tst(result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr->environment());
+ }
}
}
@@ -3258,7 +3294,7 @@
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
@@ -3435,7 +3471,7 @@
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
@@ -3980,7 +4016,7 @@
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@@ -4035,7 +4071,7 @@
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -4109,7 +4145,7 @@
Register src = ToRegister(instr->InputAt(0));
Register dst = ToRegister(instr->result());
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
@@ -4180,7 +4216,7 @@
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
@@ -4382,7 +4418,7 @@
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Optimistically untag the input.
// If the input is a HeapObject, SmiUntag will set the carry flag.
@@ -4642,7 +4678,8 @@
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
@@ -5152,6 +5189,8 @@
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ // Block literal pool emission for duration of padding.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
@@ -5246,7 +5285,7 @@
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 17fd09f..f35c69b 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -51,20 +51,20 @@
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deopt_jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, zone),
+ deopt_jump_table_(4, zone),
+ deoptimization_literals_(8, zone),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(zone),
- deferred_(8),
+ deferred_(8, zone),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
+ zone_(zone),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- zone_(zone) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -181,7 +181,7 @@
void Abort(const char* format, ...);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -319,7 +319,8 @@
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -371,13 +372,13 @@
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ Zone* zone_;
+
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
- Zone* zone_;
-
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index cefca47..c100720 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -36,7 +36,7 @@
static const Register kSavedValueRegister = { 9 };
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { }
@@ -79,7 +79,7 @@
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b4aec54..7c49e9e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -2000,7 +2000,15 @@
Label* early_success,
CompareMapMode mode) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- cmp(scratch, Operand(map));
+ CompareMap(scratch, map, early_success, mode);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ cmp(obj_map, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
@@ -2008,10 +2016,10 @@
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
+ current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(current_map)));
+ cmp(obj_map, Operand(Handle<Map>(current_map)));
}
}
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index b93aba1..6b7d116 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -831,6 +831,13 @@
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index dde115a..66cdd84 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -119,8 +119,10 @@
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 50b4866..9bebb4d 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -45,7 +45,7 @@
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index cde08a2..dd9de23 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -435,22 +435,59 @@
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -474,14 +511,14 @@
if (!transition.is_null()) {
// Update the map of the object.
- __ mov(scratch, Operand(transition));
- __ str(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
- scratch,
+ scratch1,
name_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@@ -508,15 +545,16 @@
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch, offset));
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(r0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(r0, &exit);
@@ -524,7 +562,7 @@
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -1269,8 +1307,9 @@
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -2568,7 +2607,13 @@
// -----------------------------------
Label miss;
- GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r1, r2, r3, r4,
+ &miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -2623,6 +2668,51 @@
}
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ Handle<String> name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(r0);
+
+ // Call the JavaScript getter with the receiver and the value on the stack.
+ __ Push(r1, r0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r0);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver,
Handle<String> name) {
@@ -2790,6 +2880,44 @@
}
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r0, &miss);
+ CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(r0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
@@ -3114,7 +3242,13 @@
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r2, r1, r3, r4,
+ &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
diff --git a/src/assembler.cc b/src/assembler.cc
index be25649..d4c49dd 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -955,6 +955,24 @@
}
+ExternalReference ExternalReference::address_of_pending_message_obj(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_message_obj_address());
+}
+
+
+ExternalReference ExternalReference::address_of_has_pending_message(
+ Isolate* isolate) {
+ return ExternalReference(isolate->has_pending_message_address());
+}
+
+
+ExternalReference ExternalReference::address_of_pending_message_script(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_message_script_address());
+}
+
+
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
@@ -1133,6 +1151,12 @@
}
+ExternalReference ExternalReference::page_flags(Page* page) {
+ return ExternalReference(reinterpret_cast<Address>(page) +
+ MemoryChunk::kFlagsOffset);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
diff --git a/src/assembler.h b/src/assembler.h
index 05fe320..619c69c 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -640,6 +640,9 @@
static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address(Isolate* isolate);
+ static ExternalReference address_of_pending_message_obj(Isolate* isolate);
+ static ExternalReference address_of_has_pending_message(Isolate* isolate);
+ static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
@@ -656,6 +659,8 @@
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
+ static ExternalReference page_flags(Page* page);
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/ast.cc b/src/ast.cc
index 6f9fd7a..0970253 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -242,8 +242,11 @@
}
-void ObjectLiteral::CalculateEmitStore() {
- ZoneHashMap table(Literal::Match);
+void ObjectLiteral::CalculateEmitStore(Zone* zone) {
+ ZoneAllocationPolicy allocator(zone);
+
+ ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
+ allocator);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
@@ -252,23 +255,23 @@
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED &&
- table.Lookup(literal, hash, false) != NULL) {
+ table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {
// Add key to the table.
- table.Lookup(literal, hash, true);
+ table.Lookup(literal, hash, true, allocator);
}
}
}
-void TargetCollector::AddTarget(Label* target) {
+void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates.
int length = targets_.length();
for (int i = 0; i < length; i++) {
if (targets_[i] == target) return;
}
- targets_.Add(target);
+ targets_.Add(target, zone);
}
@@ -397,7 +400,8 @@
// ----------------------------------------------------------------------------
// Recording of type feedback
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
// Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
@@ -421,15 +425,17 @@
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this));
+ receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
+ zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
@@ -441,22 +447,23 @@
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
+ receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
-void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
receiver_types_.Clear();
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
+ receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
@@ -507,7 +514,6 @@
// We don't know the target.
return false;
case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Perhaps something interesting is up in the prototype chain...
@@ -784,7 +790,7 @@
// output formats are alike.
class RegExpUnparser: public RegExpVisitor {
public:
- RegExpUnparser();
+ explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
@@ -794,10 +800,11 @@
StringStream* stream() { return &stream_; }
HeapStringAllocator alloc_;
StringStream stream_;
+ Zone* zone_;
};
-RegExpUnparser::RegExpUnparser() : stream_(&alloc_) {
+RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
}
@@ -837,9 +844,9 @@
if (that->is_negated())
stream()->Add("^");
stream()->Add("[");
- for (int i = 0; i < that->ranges()->length(); i++) {
+ for (int i = 0; i < that->ranges(zone_)->length(); i++) {
if (i > 0) stream()->Add(" ");
- VisitCharacterRange(that->ranges()->at(i));
+ VisitCharacterRange(that->ranges(zone_)->at(i));
}
stream()->Add("]");
return NULL;
@@ -941,8 +948,8 @@
}
-SmartArrayPointer<const char> RegExpTree::ToString() {
- RegExpUnparser unparser;
+SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
+ RegExpUnparser unparser(zone);
Accept(&unparser, NULL);
return unparser.ToString();
}
diff --git a/src/ast.h b/src/ast.h
index 6417d9f..02ece7f 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -266,17 +266,17 @@
class SmallMapList {
public:
SmallMapList() {}
- explicit SmallMapList(int capacity) : list_(capacity) {}
+ SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
- void Reserve(int capacity) { list_.Reserve(capacity); }
+ void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
void Clear() { list_.Clear(); }
void Sort() { list_.Sort(); }
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
- void Add(Handle<Map> handle) {
- list_.Add(handle.location());
+ void Add(Handle<Map> handle, Zone* zone) {
+ list_.Add(handle.location(), zone);
}
Handle<Map> at(int i) const {
@@ -432,9 +432,10 @@
Block(Isolate* isolate,
ZoneStringList* labels,
int capacity,
- bool is_initializer_block)
+ bool is_initializer_block,
+ Zone* zone)
: BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
+ statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
}
@@ -597,7 +598,7 @@
Interface* interface() const { return interface_; }
protected:
- Module() : interface_(Interface::NewModule()) {}
+ explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {}
explicit Module(Interface* interface) : interface_(interface) {}
private:
@@ -652,8 +653,9 @@
protected:
template<class> friend class AstNodeFactory;
- ModulePath(Module* module, Handle<String> name)
- : module_(module),
+ ModulePath(Module* module, Handle<String> name, Zone* zone)
+ : Module(zone),
+ module_(module),
name_(name) {
}
@@ -672,7 +674,8 @@
protected:
template<class> friend class AstNodeFactory;
- explicit ModuleUrl(Handle<String> url) : url_(url) {
+ ModuleUrl(Handle<String> url, Zone* zone)
+ : Module(zone), url_(url) {
}
private:
@@ -1100,12 +1103,12 @@
// stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode {
public:
- TargetCollector() : targets_(0) { }
+ explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
- void AddTarget(Label* target);
+ void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
@@ -1363,7 +1366,7 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- void CalculateEmitStore();
+ void CalculateEmitStore(Zone* zone);
enum Flags {
kNoFlags = 0,
@@ -1528,7 +1531,7 @@
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
@@ -1801,7 +1804,7 @@
virtual void MarkAsStatement() { is_prefix_ = true; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@@ -1954,7 +1957,7 @@
void mark_block_end() { block_end_ = true; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@@ -2213,8 +2216,8 @@
// Returns the interval of registers used for captures within this
// expression.
virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text);
- SmartArrayPointer<const char> ToString();
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ SmartArrayPointer<const char> ToString(Zone* zone);
#define MAKE_ASTYPE(Name) \
virtual RegExp##Name* As##Name(); \
virtual bool Is##Name();
@@ -2299,7 +2302,7 @@
explicit CharacterSet(ZoneList<CharacterRange>* ranges)
: ranges_(ranges),
standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges();
+ ZoneList<CharacterRange>* ranges(Zone* zone);
uc16 standard_set_type() { return standard_set_type_; }
void set_standard_set_type(uc16 special_set_type) {
standard_set_type_ = special_set_type;
@@ -2330,11 +2333,11 @@
virtual bool IsTextElement() { return true; }
virtual int min_match() { return 1; }
virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard();
+ bool is_standard(Zone* zone);
// Returns a value representing the standard character set if is_standard()
// returns true.
// Currently used values are:
@@ -2347,7 +2350,7 @@
// . : non-unicode non-newline
// * : All characters
uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
+ ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() { return is_negated_; }
private:
@@ -2367,7 +2370,7 @@
virtual bool IsTextElement() { return true; }
virtual int min_match() { return data_.length(); }
virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2377,7 +2380,7 @@
class RegExpText: public RegExpTree {
public:
- RegExpText() : elements_(2), length_(0) {}
+ explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@@ -2386,9 +2389,9 @@
virtual bool IsTextElement() { return true; }
virtual int min_match() { return length_; }
virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text);
- void AddElement(TextElement elm) {
- elements_.Add(elm);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ void AddElement(TextElement elm, Zone* zone) {
+ elements_.Add(elm, zone);
length_ += elm.length();
}
ZoneList<TextElement>* elements() { return &elements_; }
@@ -2696,20 +2699,21 @@
}
ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name);
+ ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
VISIT_AND_RETURN(ModulePath, module)
}
ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url);
+ ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
VISIT_AND_RETURN(ModuleUrl, module)
}
Block* NewBlock(ZoneStringList* labels,
int capacity,
- bool is_initializer_block) {
+ bool is_initializer_block,
+ Zone* zone) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block);
+ isolate_, labels, capacity, is_initializer_block, zone);
VISIT_AND_RETURN(Block, block)
}
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 3805a30..33cbb81 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1632,9 +1632,10 @@
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map =
- array_function->initial_map()->CopyDropTransitions();
+ array_function->initial_map()->CopyDropTransitions(
+ DescriptorArray::MAY_BE_SHARED);
Map* new_map;
- if (!maybe_map->To<Map>(&new_map)) return false;
+ if (!maybe_map->To(&new_map)) return false;
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map);
@@ -2191,7 +2192,6 @@
break;
}
case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Ignore non-properties.
diff --git a/src/compiler.cc b/src/compiler.cc
index e8094ed..d44718b 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -294,8 +294,9 @@
}
Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context, info->isolate());
- HGraphBuilder builder(info, &oracle);
+ TypeFeedbackOracle oracle(code, global_context, info->isolate(),
+ info->isolate()->zone());
+ HGraphBuilder builder(info, &oracle, info->isolate()->zone());
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
if (info->isolate()->has_pending_exception()) {
@@ -346,7 +347,8 @@
// the compilation info is set if compilation succeeded.
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
+ info->isolate()->zone());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@@ -420,7 +422,7 @@
lit->name(),
lit->materialized_literal_count(),
info->code(),
- ScopeInfo::Create(info->scope()));
+ ScopeInfo::Create(info->scope(), info->isolate()->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -462,7 +464,7 @@
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
- live_edit_tracker.RecordFunctionInfo(result, lit);
+ live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
return result;
}
@@ -651,7 +653,8 @@
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this.
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->isolate()->zone());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
@@ -728,7 +731,7 @@
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope());
+ scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
} else {
return Handle<SharedFunctionInfo>::null();
}
@@ -747,7 +750,7 @@
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal);
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
return result;
}
diff --git a/src/d8.cc b/src/d8.cc
index 297bf79..7a01d55 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -443,8 +443,9 @@
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
- Local<Value> length = object->ToObject()->Get(String::New("byteLength"));
- V8::AdjustAmountOfExternalAllocatedMemory(-length->Uint32Value());
+ int32_t length =
+ object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
+ V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose();
}
@@ -1019,7 +1020,7 @@
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
- STATIC_ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
+ ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 32f0f9e..a5c7143 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -148,6 +148,9 @@
} else {
// Garbage words are illegal if a number has been read.
if (has_read_number) return false;
+ // The first number has to be separated from garbage words by
+ // whitespace or other separators.
+ if (scanner.Peek().IsNumber()) return false;
}
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).
diff --git a/src/debug.cc b/src/debug.cc
index 9efb5c3..543ce9f 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1451,7 +1451,7 @@
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
} else {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
@@ -1520,7 +1520,7 @@
// propagated on the next Debug::Break.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
}
// Step in or Step in min
@@ -1555,7 +1555,7 @@
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
break_location_iterator->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->fp() &&
+ return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position;
}
@@ -1756,7 +1756,7 @@
void Debug::ActivateStepIn(StackFrame* frame) {
ASSERT(!StepOutActive());
- thread_local_.step_into_fp_ = frame->fp();
+ thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
@@ -1767,7 +1767,7 @@
void Debug::ActivateStepOut(StackFrame* frame) {
ASSERT(!StepInActive());
- thread_local_.step_out_fp_ = frame->fp();
+ thread_local_.step_out_fp_ = frame->UnpaddedFP();
}
@@ -1784,20 +1784,19 @@
// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization
-// information. Deoptimization information is required in case that an
-// optimized version of this function is still activated on the
-// stack. It will also make sure that the full code is compiled with
-// the same flags as the previous version - that is flags which can
-// change the code generated. The current method of mapping from
-// already compiled full code without debug break slots to full code
-// with debug break slots depends on the generated code is otherwise
-// exactly the same.
-static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
+// have debug break slots and deoptimization information. Deoptimization
+// information is required in case that an optimized version of this
+// function is still activated on the stack. It will also make sure that
+// the full code is compiled with the same flags as the previous version,
+// that is flags which can change the code generated. The current method
+// of mapping from already compiled full code without debug break slots
+// to full code with debug break slots depends on the generated code is
+// otherwise exactly the same.
+static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
- CompilationInfo info(shared);
+ CompilationInfo info(function);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
@@ -1809,7 +1808,7 @@
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
- Handle<Code> new_code(shared->code());
+ Handle<Code> new_code(function->shared()->code());
ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable());
@@ -2013,6 +2012,7 @@
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
+ Handle<SharedFunctionInfo> shared(function->shared());
if (function->code()->kind() == Code::FUNCTION &&
function->code()->has_debug_break_slots()) {
@@ -2020,7 +2020,6 @@
continue;
}
- Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
@@ -2040,7 +2039,7 @@
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
- CompileFullCodeForDebugging(shared, current_code);
+ CompileFullCodeForDebugging(function, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 8f411a3..3debf55 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -354,6 +354,7 @@
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
+ has_alignment_padding_(0),
input_(NULL),
output_count_(0),
jsframe_count_(0),
@@ -378,6 +379,7 @@
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
+ function->shared()->increment_deopt_count();
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
@@ -593,12 +595,14 @@
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
- PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
+ PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ " took %0.3f ms]\n",
node_id,
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
output_[index]->GetState()->value())),
+ has_alignment_padding_ ? "with padding" : "no padding",
ms);
}
}
@@ -769,7 +773,7 @@
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
@@ -789,7 +793,7 @@
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
+ PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
@@ -815,7 +819,7 @@
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 6cf7143..9e8a549 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -221,6 +221,10 @@
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int has_alignment_padding_offset() {
+ return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ }
+
static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
@@ -322,6 +326,7 @@
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
+ int has_alignment_padding_;
// Input frame description.
FrameDescription* input_;
@@ -595,7 +600,7 @@
void StoreArgumentsObject();
void MarkDuplicate();
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
static int NumberOfOperandsFor(Opcode opcode);
diff --git a/src/elements-kind.h b/src/elements-kind.h
index ab31a33..3be7711 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -86,6 +86,17 @@
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+inline bool IsDictionaryElementsKind(ElementsKind kind) {
+ return kind == DICTIONARY_ELEMENTS;
+}
+
+
+inline bool IsExternalArrayElementsKind(ElementsKind kind) {
+ return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+}
+
+
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
diff --git a/src/elements.cc b/src/elements.cc
index 2692cb5..f0e1414 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -65,6 +65,9 @@
namespace internal {
+static const int kPackedSizeNotKnown = -1;
+
+
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
// fast element handler for smi-only arrays. The implementation is currently
@@ -326,6 +329,76 @@
}
+static void CopySmiToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ Object* the_hole = from->GetHeap()->the_hole_value();
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object* hole_or_smi = from->get(from_start);
+ if (hole_or_smi == the_hole) {
+ to->set_the_hole(to_start);
+ } else {
+ to->set(to_start, Smi::cast(hole_or_smi)->value());
+ }
+ }
+}
+
+
+static void CopyPackedSmiToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int packed_size,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ uint32_t to_end;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ to_end = to->length();
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ ASSERT(static_cast<int>(to_end) <= to->length());
+ ASSERT(packed_size >= 0 && packed_size <= copy_size);
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object* smi = from->get(from_start);
+ ASSERT(!smi->IsTheHole());
+ to->set(to_start, Smi::cast(smi)->value());
+ }
+
+ while (to_start < to_end) {
+ to->set_the_hole(to_start++);
+ }
+}
+
+
static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
@@ -345,12 +418,14 @@
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- for (int i = 0; i < copy_size; i++) {
- Object* hole_or_object = from->get(i + from_start);
- if (hole_or_object->IsTheHole()) {
- to->set_the_hole(i + to_start);
+ Object* the_hole = from->GetHeap()->the_hole_value();
+ for (uint32_t from_end = from_start + copy_size;
+ from_start < from_end; from_start++, to_start++) {
+ Object* hole_or_object = from->get(from_start);
+ if (hole_or_object == the_hole) {
+ to->set_the_hole(to_start);
} else {
- to->set(i + to_start, hole_or_object->Number());
+ to->set(to_start, hole_or_object->Number());
}
}
}
@@ -527,6 +602,7 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
UNREACHABLE();
return NULL;
@@ -539,14 +615,27 @@
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
+ int packed_size = kPackedSizeNotKnown;
if (from == NULL) {
from = from_holder->elements();
}
+
+ if (from_holder) {
+ ElementsKind elements_kind = from_holder->GetElementsKind();
+ bool is_packed = IsFastPackedElementsKind(elements_kind) &&
+ from_holder->IsJSArray();
+ if (is_packed) {
+ packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
+ if (copy_size >= 0 && packed_size > copy_size) {
+ packed_size = copy_size;
+ }
+ }
+ }
if (from->length() == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, to_kind, to_start, copy_size);
+ from, from_start, to, to_kind, to_start, packed_size, copy_size);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -849,15 +938,30 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
if (IsFastSmiOrObjectElementsKind(to_kind)) {
CopyObjectToObjectElements(
FixedArray::cast(from), KindTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) {
- CopyObjectToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
+ if (IsFastSmiElementsKind(KindTraits::Kind)) {
+ if (IsFastPackedElementsKind(KindTraits::Kind) &&
+ packed_size != kPackedSizeNotKnown) {
+ CopyPackedSmiToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start,
+ packed_size, copy_size);
+ } else {
+ CopySmiToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ }
+ } else {
+ CopyObjectToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ }
} else {
UNREACHABLE();
}
@@ -952,6 +1056,7 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ELEMENTS:
@@ -1265,6 +1370,7 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ELEMENTS:
@@ -1417,6 +1523,7 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
diff --git a/src/factory.cc b/src/factory.cc
index cf4739b..28b318a 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -115,7 +115,8 @@
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors),
+ DescriptorArray::Allocate(number_of_descriptors,
+ DescriptorArray::MAY_BE_SHARED),
DescriptorArray);
}
@@ -496,7 +497,9 @@
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
+ CALL_HEAP_FUNCTION(isolate(),
+ src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED),
+ Map);
}
@@ -939,7 +942,7 @@
Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing.
- if (result->LinearSearch(*key, descriptor_count) ==
+ if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
result->Set(descriptor_count, &desc, witness);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index fc9a1db..2b4c53c 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -132,6 +132,8 @@
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
+DEFINE_bool(es5_readonly, false,
+ "activate correct semantics for inheriting readonliness")
DEFINE_bool(es52_globals, false,
"activate new semantics for global var declarations")
@@ -196,7 +198,7 @@
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
-DEFINE_bool(array_bounds_checks_elimination, true,
+DEFINE_bool(array_bounds_checks_elimination, false,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
diff --git a/src/frames.cc b/src/frames.cc
index e265341..b7e0286 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -469,6 +469,20 @@
}
+Address StackFrame::UnpaddedFP() const {
+#if defined(V8_TARGET_ARCH_IA32)
+ if (!is_optimized()) return fp();
+ int32_t alignment_state = Memory::int32_at(
+ fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
+
+ return (alignment_state == kAlignmentPaddingPushed) ?
+ (fp() + kPointerSize) : fp();
+#else
+ return fp();
+#endif
+}
+
+
Code* EntryFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_entry_code();
}
@@ -1394,11 +1408,11 @@
STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
#undef DEFINE_WRAPPER
-static StackFrame* AllocateFrameCopy(StackFrame* frame) {
+static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: { \
field##_Wrapper* wrapper = \
- new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
+ new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
return &wrapper->frame_; \
}
@@ -1410,11 +1424,11 @@
return NULL;
}
-Vector<StackFrame*> CreateStackMap() {
- ZoneList<StackFrame*> list(10);
+Vector<StackFrame*> CreateStackMap(Zone* zone) {
+ ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it; !it.done(); it.Advance()) {
- StackFrame* frame = AllocateFrameCopy(it.frame());
- list.Add(frame);
+ StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
+ list.Add(frame, zone);
}
return list.ToVector();
}
diff --git a/src/frames.h b/src/frames.h
index 78cdd0c..2d45932 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -206,6 +206,11 @@
Address fp() const { return state_.fp; }
Address caller_sp() const { return GetCallerStackPointer(); }
+ // If this frame is optimized and was dynamically aligned return its old
+ // unaligned frame pointer. When the frame is deoptimized its FP will shift
+ // up one word and become unaligned.
+ Address UnpaddedFP() const;
+
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
@@ -888,7 +893,7 @@
// Reads all frames on the current stack and copies them into the current
// zone memory.
-Vector<StackFrame*> CreateStackMap();
+Vector<StackFrame*> CreateStackMap(Zone* zone);
} } // namespace v8::internal
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 9b1df4e..4da4e53 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -303,7 +303,7 @@
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
- FullCodeGenerator cgen(&masm, info);
+ FullCodeGenerator cgen(&masm, info, isolate->zone());
cgen.Generate();
if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception());
@@ -440,14 +440,14 @@
}
}
#endif // DEBUG
- bailout_entries_.Add(entry);
+ bailout_entries_.Add(entry, zone());
}
void FullCodeGenerator::RecordTypeFeedbackCell(
unsigned id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry);
+ type_feedback_cells_.Add(entry, zone());
}
@@ -456,7 +456,7 @@
// state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
- stack_checks_.Add(entry);
+ stack_checks_.Add(entry, zone());
}
@@ -570,7 +570,7 @@
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
ZoneList<Handle<Object> >* saved_globals = globals_;
- ZoneList<Handle<Object> > inner_globals(10);
+ ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
AstVisitor::VisitDeclarations(declarations);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 2a6b705..928de47 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -77,7 +77,8 @@
TOS_REG
};
- FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
+ FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
+ Zone* zone)
: masm_(masm),
info_(info),
scope_(info->scope()),
@@ -86,11 +87,12 @@
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0),
- stack_checks_(2), // There's always at least one.
+ ? info->function()->ast_node_count() : 0, zone),
+ stack_checks_(2, zone), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0),
- ic_total_count_(0) { }
+ ? info->function()->ast_node_count() : 0, zone),
+ ic_total_count_(0),
+ zone_(zone) { }
static bool MakeCode(CompilationInfo* info);
@@ -108,6 +110,8 @@
return NULL;
}
+ Zone* zone() const { return zone_; }
+
private:
class Breakable;
class Iteration;
@@ -236,7 +240,7 @@
// The finally block of a try/finally statement.
class Finally : public NestedStatement {
public:
- static const int kElementCount = 2;
+ static const int kElementCount = 5;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
@@ -786,6 +790,7 @@
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
+ Zone* zone_;
friend class NestedStatement;
@@ -800,11 +805,12 @@
public:
explicit AccessorTable(Zone* zone) :
TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match),
+ ZoneAllocationPolicy>(Literal::Match,
+ ZoneAllocationPolicy(zone)),
zone_(zone) { }
Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true);
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
return it;
}
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index 239358d..2dd0bbc 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -34,11 +34,12 @@
namespace v8 {
namespace internal {
-FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
+FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
: isolate_(isolate),
- entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4) {
+ entries_stack_(10, zone),
+ names_stack_(5, zone),
+ funcs_to_infer_(4, zone),
+ zone_(zone) {
}
@@ -48,21 +49,21 @@
// and starts with a capital letter.
if (name->length() > 0 && Runtime::IsUpperCaseChar(
isolate()->runtime_state(), name->Get(0))) {
- names_stack_.Add(Name(name, kEnclosingConstructorName));
+ names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
- names_stack_.Add(Name(name, kLiteralName));
+ names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
- names_stack_.Add(Name(name, kVariableName));
+ names_stack_.Add(Name(name, kVariableName), zone());
}
}
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index ccd962a..f57e778 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -45,7 +45,7 @@
// a name.
class FuncNameInferrer : public ZoneObject {
public:
- explicit FuncNameInferrer(Isolate* isolate);
+ FuncNameInferrer(Isolate* isolate, Zone* zone);
// Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
@@ -55,7 +55,7 @@
// Enters name collection state.
void Enter() {
- entries_stack_.Add(names_stack_.length());
+ entries_stack_.Add(names_stack_.length(), zone());
}
// Pushes an encountered name onto names stack when in collection state.
@@ -66,7 +66,7 @@
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
- funcs_to_infer_.Add(func_to_infer);
+ funcs_to_infer_.Add(func_to_infer, zone());
}
}
@@ -105,6 +105,7 @@
};
Isolate* isolate() { return isolate_; }
+ Zone* zone() const { return zone_; }
// Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
@@ -119,6 +120,7 @@
ZoneList<int> entries_stack_;
ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_;
+ Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};
diff --git a/src/hashmap.h b/src/hashmap.h
index e32ed16..6f76e9f 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -297,7 +297,7 @@
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
- Lookup(p->key, p->hash, true)->value = p->value;
+ Lookup(p->key, p->hash, true, allocator)->value = p->value;
n--;
}
}
@@ -349,8 +349,9 @@
Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); }
- Iterator find(Key* key, bool insert = false) {
- return Iterator(this, this->Lookup(key, key->Hash(), insert));
+ Iterator find(Key* key, bool insert = false,
+ AllocationPolicy allocator = AllocationPolicy()) {
+ return Iterator(this, this->Lookup(key, key->Hash(), insert, allocator));
}
};
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 975d6f4..301b099 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -168,6 +168,14 @@
}
+size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ size_t size = profiler->snapshots_->GetUsedMemorySize();
+ return size;
+}
+
+
int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 4a81157..346177b 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -49,6 +49,8 @@
static void SetUp();
static void TearDown();
+ static size_t GetMemorySizeUsedByProfiler();
+
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
diff --git a/src/heap.cc b/src/heap.cc
index a224e2b..172405b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -66,21 +66,26 @@
: isolate_(NULL),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if defined(ANDROID)
-#define LUMP_OF_MEMORY (128 * KB)
- code_range_size_(0),
-#elif defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64)
#define LUMP_OF_MEMORY (2 * MB)
code_range_size_(512*MB),
#else
#define LUMP_OF_MEMORY MB
code_range_size_(0),
#endif
+#if defined(ANDROID)
+ reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ initial_semispace_size_(Page::kPageSize),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
+#else
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(256l * LUMP_OF_MEMORY),
+#endif
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
@@ -3017,8 +3022,8 @@
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
- share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_ic_age(0);
+ share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -3666,7 +3671,8 @@
Map* new_map;
ASSERT(object_function->has_initial_map());
{ MaybeObject* maybe_map =
- object_function->initial_map()->CopyDropTransitions();
+ object_function->initial_map()->CopyDropTransitions(
+ DescriptorArray::MAY_BE_SHARED);
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
}
Object* prototype;
@@ -3814,7 +3820,8 @@
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
+ { MaybeObject* maybe_descriptors_obj =
+ DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
return maybe_descriptors_obj;
}
diff --git a/src/heap.h b/src/heap.h
index 55e7135..dd1f710 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1597,7 +1597,7 @@
}
void AgeInlineCaches() {
- ++global_ic_age_;
+ global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
private:
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 57a1862..4bb2509 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -336,7 +336,8 @@
// Do not reuse use list nodes in debug mode, zap them.
if (current != NULL) {
HUseListNode* temp =
- new HUseListNode(current->value(), current->index(), NULL);
+ new(block()->zone())
+ HUseListNode(current->value(), current->index(), NULL);
current->Zap();
current = temp;
}
@@ -495,8 +496,8 @@
if (new_value != NULL) {
if (removed == NULL) {
- new_value->use_list_ =
- new HUseListNode(this, index, new_value->use_list_);
+ new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
+ this, index, new_value->use_list_);
} else {
removed->set_tail(new_value->use_list_);
new_value->use_list_ = removed;
@@ -964,7 +965,7 @@
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
- HMathFloorOfDiv* instr = new HMathFloorOfDiv(context(),
+ HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
new_left,
new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
@@ -1251,7 +1252,7 @@
void HPhi::AddInput(HValue* value) {
- inputs_.Add(NULL);
+ inputs_.Add(NULL, value->block()->zone());
SetOperandAt(OperandCount() - 1, value);
// Mark phis that may have 'arguments' directly or indirectly as an operand.
if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
@@ -1298,14 +1299,33 @@
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->RequiredInputRepresentation(it.index());
+ Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
+ if (FLAG_trace_representation) {
+ PrintF("%d %s is used by %d %s as %s\n",
+ this->id(),
+ this->Mnemonic(),
+ value->id(),
+ value->Mnemonic(),
+ rep.Mnemonic());
+ }
}
}
}
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
+ if (FLAG_trace_representation) {
+ PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
+ this->id(),
+ this->Mnemonic(),
+ other->id(),
+ other->Mnemonic(),
+ other->non_phi_uses_[Representation::kInteger32],
+ other->non_phi_uses_[Representation::kDouble],
+ other->non_phi_uses_[Representation::kTagged]);
+ }
+
for (int i = 0; i < Representation::kNumRepresentations; i++) {
indirect_uses_[i] += other->non_phi_uses_[i];
}
@@ -1319,6 +1339,12 @@
}
+void HPhi::ResetInteger32Uses() {
+ non_phi_uses_[Representation::kInteger32] = 0;
+ indirect_uses_[Representation::kInteger32] = 0;
+}
+
+
void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d", ast_id());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
@@ -1372,18 +1398,18 @@
}
-HConstant* HConstant::CopyToRepresentation(Representation r) const {
+HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
- return new HConstant(handle_, r);
+ return new(zone) HConstant(handle_, r);
}
-HConstant* HConstant::CopyToTruncatedInt32() const {
+HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_);
- return new HConstant(FACTORY->NewNumberFromInt(truncated),
- Representation::Integer32());
+ return new(zone) HConstant(FACTORY->NewNumberFromInt(truncated),
+ Representation::Integer32());
}
@@ -1592,18 +1618,51 @@
}
+// Returns true if an instance of this map can never find a property with this
+// name in its prototype chain. This means all prototypes up to the top are
+// fast and don't have the name in them. It would be good if we could optimize
+// polymorphic loads where the property is sometimes found in the prototype
+// chain.
+static bool PrototypeChainCanNeverResolve(
+ Handle<Map> map, Handle<String> name) {
+ Isolate* isolate = map->GetIsolate();
+ Object* current = map->prototype();
+ while (current != isolate->heap()->null_value()) {
+ if (current->IsJSGlobalProxy() ||
+ current->IsGlobalObject() ||
+ !current->IsJSObject() ||
+ JSObject::cast(current)->IsAccessCheckNeeded() ||
+ !JSObject::cast(current)->HasFastProperties()) {
+ return false;
+ }
+
+ LookupResult lookup(isolate);
+ JSObject::cast(current)->map()->LookupInDescriptors(NULL, *name, &lookup);
+ if (lookup.IsFound()) {
+ if (lookup.type() != MAP_TRANSITION) return false;
+ } else if (!lookup.IsCacheable()) {
+ return false;
+ }
+
+ current = JSObject::cast(current)->GetPrototype();
+ }
+ return true;
+}
+
+
HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
- Handle<String> name)
- : types_(Min(types->length(), kMaxLoadPolymorphism)),
+ Handle<String> name,
+ Zone* zone)
+ : types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
- int map_transitions = 0;
+ SmallMapList negative_lookups;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
@@ -1619,28 +1678,39 @@
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
- types_.Add(types->at(i));
+ types_.Add(types->at(i), zone);
break;
}
case CONSTANT_FUNCTION:
- types_.Add(types->at(i));
+ types_.Add(types->at(i), zone);
break;
case MAP_TRANSITION:
- // We should just ignore these since they are not relevant to a load
- // operation. This means we will deopt if we actually see this map
- // from optimized code.
- map_transitions++;
+ if (PrototypeChainCanNeverResolve(map, name)) {
+ negative_lookups.Add(types->at(i), zone);
+ }
break;
default:
break;
}
+ } else if (lookup.IsCacheable()) {
+ if (PrototypeChainCanNeverResolve(map, name)) {
+ negative_lookups.Add(types->at(i), zone);
+ }
}
}
- if (types_.length() + map_transitions == types->length() &&
- FLAG_deoptimize_uncommon_cases) {
+ bool need_generic =
+ (types->length() != negative_lookups.length() + types_.length());
+ if (!need_generic && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
+ for (int i = 0; i < negative_lookups.length(); i++) {
+ types_.Add(negative_lookups.at(i), zone);
+ }
} else {
+ // We don't have an easy way to handle both a call (to the generic stub) and
+ // a deopt in the same hydrogen instruction, so in this case we don't add
+ // the negative lookups which can deopt - just let the generic stub handle
+ // them.
SetAllSideEffects();
need_generic_ = true;
}
@@ -1685,14 +1755,14 @@
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
- if (hole_check_mode_ == PERFORM_HOLE_CHECK) {
+ if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
}
bool HLoadKeyedFastElement::RequiresHoleCheck() {
- if (hole_check_mode_ == OMIT_HOLE_CHECK) {
+ if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
@@ -1738,12 +1808,11 @@
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
- key_load->key(),
- OMIT_HOLE_CHECK);
- HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
- object(), index);
+ key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
+ HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
+ object(), index);
load->InsertBefore(this);
return load;
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 0f6cb6e..780d57d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -720,6 +720,11 @@
return representation();
}
+ // Type feedback access.
+ virtual Representation ObservedInputRepresentation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
// instruction with a new one, first add the new instruction to the graph,
@@ -987,7 +992,8 @@
class HDeoptimize: public HControlInstruction {
public:
- explicit HDeoptimize(int environment_length) : values_(environment_length) { }
+ HDeoptimize(int environment_length, Zone* zone)
+ : values_(environment_length, zone) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1006,8 +1012,8 @@
UNREACHABLE();
}
- void AddEnvironmentValue(HValue* value) {
- values_.Add(NULL);
+ void AddEnvironmentValue(HValue* value, Zone* zone) {
+ values_.Add(NULL, zone);
SetOperandAt(values_.length() - 1, value);
}
@@ -1275,11 +1281,12 @@
class HSimulate: public HInstruction {
public:
- HSimulate(int ast_id, int pop_count)
+ HSimulate(int ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
- values_(2),
- assigned_indexes_(2) {}
+ values_(2, zone),
+ assigned_indexes_(2, zone),
+ zone_(zone) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
@@ -1327,9 +1334,9 @@
private:
static const int kNoIndex = -1;
void AddValue(int index, HValue* value) {
- assigned_indexes_.Add(index);
+ assigned_indexes_.Add(index, zone_);
// Resize the list of pushed values.
- values_.Add(NULL);
+ values_.Add(NULL, zone_);
// Set the operand through the base method in HValue to make sure that the
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
@@ -1338,6 +1345,7 @@
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
+ Zone* zone_;
};
@@ -1840,7 +1848,9 @@
class HJSArrayLength: public HTemplateInstruction<2> {
public:
- HJSArrayLength(HValue* value, HValue* typecheck) {
+ HJSArrayLength(HValue* value, HValue* typecheck,
+ HType type = HType::Tagged()) {
+ set_type(type);
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
@@ -1864,7 +1874,7 @@
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other_raw) { return true; }
};
@@ -2055,7 +2065,8 @@
class HCheckMaps: public HTemplateInstruction<2> {
public:
- HCheckMaps(HValue* value, Handle<Map> map, HValue* typecheck = NULL) {
+ HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
+ HValue* typecheck = NULL) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@@ -2064,9 +2075,9 @@
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
- map_set()->Add(map);
+ map_set()->Add(map, zone);
}
- HCheckMaps(HValue* value, SmallMapList* maps) {
+ HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
@@ -2074,13 +2085,14 @@
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) {
- map_set()->Add(maps->at(i));
+ map_set()->Add(maps->at(i), zone);
}
map_set()->Sort();
}
- static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map) {
- HCheckMaps* check_map = new HCheckMaps(object, map);
+ static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
+ Zone* zone) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
SmallMapList* map_set = check_map->map_set();
// Since transitioned elements maps of the initial map don't fail the map
@@ -2092,9 +2104,9 @@
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
Map* transitioned_map =
- map->LookupElementsTransitionMap(kind, NULL);
+ map->LookupElementsTransitionMap(kind);
if (transitioned_map) {
- map_set->Add(Handle<Map>(transitioned_map));
+ map_set->Add(Handle<Map>(transitioned_map), zone);
}
};
map_set->Sort();
@@ -2163,17 +2175,17 @@
class HCheckInstanceType: public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value) {
- return new HCheckInstanceType(value, IS_SPEC_OBJECT);
+ static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
}
- static HCheckInstanceType* NewIsJSArray(HValue* value) {
- return new HCheckInstanceType(value, IS_JS_ARRAY);
+ static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
}
- static HCheckInstanceType* NewIsString(HValue* value) {
- return new HCheckInstanceType(value, IS_STRING);
+ static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_STRING);
}
- static HCheckInstanceType* NewIsSymbol(HValue* value) {
- return new HCheckInstanceType(value, IS_SYMBOL);
+ static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_SYMBOL);
}
virtual void PrintDataTo(StringStream* stream);
@@ -2322,8 +2334,8 @@
class HPhi: public HValue {
public:
- explicit HPhi(int merged_index)
- : inputs_(2),
+ HPhi(int merged_index, Zone* zone)
+ : inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
is_live_(false),
@@ -2402,11 +2414,15 @@
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConvertibleToInteger()) return false;
+ if (!OperandAt(i)->IsConvertibleToInteger()) {
+ return false;
+ }
}
return true;
}
+ void ResetInteger32Uses();
+
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -2478,8 +2494,8 @@
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
- HConstant* CopyToRepresentation(Representation r) const;
- HConstant* CopyToTruncatedInt32() const;
+ HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
+ HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const {
ASSERT(HasInteger32Value());
@@ -2556,6 +2572,7 @@
if (IsCommutative() && left()->IsConstant()) return right();
return left();
}
+
HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left();
return right();
@@ -2721,6 +2738,9 @@
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
+ observed_input_representation_[0] = Representation::Tagged();
+ observed_input_representation_[1] = Representation::None();
+ observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2740,7 +2760,19 @@
virtual HType CalculateInferredType();
+ virtual Representation ObservedInputRepresentation(int index) {
+ return observed_input_representation_[index];
+ }
+
+ void InitializeObservedInputRepresentation(Representation r) {
+ observed_input_representation_[1] = r;
+ observed_input_representation_[2] = r;
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
+
+ private:
+ Representation observed_input_representation_[3];
};
@@ -3862,7 +3894,8 @@
HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
- Handle<String> name);
+ Handle<String> name,
+ Zone* zone);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
@@ -3949,18 +3982,19 @@
virtual ~ArrayInstructionInterface() { };
};
-
-enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
-
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
- HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : hole_check_mode_(hole_check_mode),
- index_offset_(0),
- is_dehoisted_(false) {
+ ElementsKind elements_kind = FAST_ELEMENTS)
+ : bit_field_(0) {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ bit_field_ = ElementsKindField::encode(elements_kind);
+ if (IsFastSmiElementsKind(elements_kind) &&
+ IsFastPackedElementsKind(elements_kind)) {
+ set_type(HType::Smi());
+ }
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
@@ -3970,12 +4004,19 @@
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
+ void SetIndexOffset(uint32_t index_offset) {
+ bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
+ void SetDehoisted(bool is_dehoisted) {
+ bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
+ }
+ ElementsKind elements_kind() const {
+ return ElementsKindField::decode(bit_field_);
+ }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@@ -3994,18 +4035,22 @@
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
- if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
+ if (IsDehoisted() && index_offset() != other_load->index_offset())
return false;
- return hole_check_mode_ == other_load->hole_check_mode_;
+ return elements_kind() == other_load->elements_kind();
}
private:
- HoleCheckMode hole_check_mode_;
- uint32_t index_offset_;
- bool is_dehoisted_;
+ class ElementsKindField: public BitField<ElementsKind, 0, 4> {};
+ class IndexOffsetField: public BitField<uint32_t, 4, 27> {};
+ class IsDehoistedField: public BitField<bool, 31, 1> {};
+ uint32_t bit_field_;
};
+enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
+
+
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
@@ -4013,15 +4058,15 @@
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : index_offset_(0),
- is_dehoisted_(false),
- hole_check_mode_(hole_check_mode) {
- SetOperandAt(0, elements);
- SetOperandAt(1, key);
- set_representation(Representation::Double());
+ : index_offset_(0),
+ is_dehoisted_(false),
+ hole_check_mode_(hole_check_mode) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
- }
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index e1070f0..61488af 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -55,19 +55,19 @@
HBasicBlock::HBasicBlock(HGraph* graph)
: block_id_(graph->GetNextBlockID()),
graph_(graph),
- phis_(4),
+ phis_(4, graph->zone()),
first_(NULL),
last_(NULL),
end_(NULL),
loop_information_(NULL),
- predecessors_(2),
+ predecessors_(2, graph->zone()),
dominator_(NULL),
- dominated_blocks_(4),
+ dominated_blocks_(4, graph->zone()),
last_environment_(NULL),
argument_count_(-1),
first_instruction_index_(-1),
last_instruction_index_(-1),
- deleted_phis_(4),
+ deleted_phis_(4, graph->zone()),
parent_loop_header_(NULL),
is_inline_return_target_(false),
is_deoptimizing_(false),
@@ -76,7 +76,7 @@
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
- loop_information_ = new(zone()) HLoopInformation(this);
+ loop_information_ = new(zone()) HLoopInformation(this, zone());
}
@@ -88,7 +88,7 @@
void HBasicBlock::AddPhi(HPhi* phi) {
ASSERT(!IsStartBlock());
- phis_.Add(phi);
+ phis_.Add(phi, zone());
phi->SetBlock(this);
}
@@ -119,13 +119,14 @@
HDeoptimize* HBasicBlock::CreateDeoptimize(
HDeoptimize::UseEnvironment has_uses) {
ASSERT(HasEnvironment());
- if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+ if (has_uses == HDeoptimize::kNoUses)
+ return new(zone()) HDeoptimize(0, zone());
HEnvironment* environment = last_environment();
- HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
+ HDeoptimize* instr = new(zone()) HDeoptimize(environment->length(), zone());
for (int i = 0; i < environment->length(); i++) {
HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val);
+ instr->AddEnvironmentValue(val, zone());
}
return instr;
@@ -141,7 +142,7 @@
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count);
+ HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
@@ -278,7 +279,7 @@
SetInitialEnvironment(pred->last_environment()->Copy());
}
- predecessors_.Add(pred);
+ predecessors_.Add(pred, zone());
}
@@ -291,7 +292,7 @@
dominated_blocks_[index]->block_id() < block->block_id()) {
++index;
}
- dominated_blocks_.InsertAt(index, block);
+ dominated_blocks_.InsertAt(index, block, zone());
}
@@ -404,7 +405,7 @@
void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
- this->back_edges_.Add(block);
+ this->back_edges_.Add(block, block->zone());
AddBlock(block);
}
@@ -430,7 +431,7 @@
AddBlock(block->parent_loop_header());
} else {
block->set_parent_loop_header(loop_header());
- blocks_.Add(block);
+ blocks_.Add(block, block->zone());
for (int i = 0; i < block->predecessors()->length(); ++i) {
AddBlock(block->predecessors()->at(i));
}
@@ -451,8 +452,8 @@
int block_count,
HBasicBlock* dont_visit)
: visited_count_(0),
- stack_(16),
- reachable_(block_count, ZONE),
+ stack_(16, entry_block->zone()),
+ reachable_(block_count, entry_block->zone()),
dont_visit_(dont_visit) {
PushBlock(entry_block);
Analyze();
@@ -466,7 +467,7 @@
if (block != NULL && block != dont_visit_ &&
!reachable_.Contains(block->block_id())) {
reachable_.Add(block->block_id());
- stack_.Add(block);
+ stack_.Add(block, block->zone());
visited_count_++;
}
}
@@ -604,7 +605,8 @@
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
+ TypeFeedbackOracle* oracle,
+ Zone* zone)
: function_state_(NULL),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
@@ -612,8 +614,8 @@
graph_(NULL),
current_block_(NULL),
inlined_count_(0),
- globals_(10),
- zone_(info->isolate()->zone()),
+ globals_(10, zone),
+ zone_(zone),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
@@ -672,15 +674,17 @@
}
-HGraph::HGraph(CompilationInfo* info)
+HGraph::HGraph(CompilationInfo* info, Zone* zone)
: isolate_(info->isolate()),
next_block_id_(0),
entry_block_(NULL),
- blocks_(8),
- values_(16),
- phi_list_(NULL) {
+ blocks_(8, zone),
+ values_(16, zone),
+ phi_list_(NULL),
+ zone_(zone),
+ is_recursive_(false) {
start_environment_ =
- new(zone()) HEnvironment(NULL, info->scope(), info->closure());
+ new(zone) HEnvironment(NULL, info->scope(), info->closure(), zone);
start_environment_->set_ast_id(AstNode::kFunctionEntryId);
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
@@ -730,7 +734,7 @@
HBasicBlock* HGraph::CreateBasicBlock() {
HBasicBlock* result = new(zone()) HBasicBlock(this);
- blocks_.Add(result);
+ blocks_.Add(result, zone());
return result;
}
@@ -753,7 +757,7 @@
HPhase phase("H_Block ordering");
BitVector visited(blocks_.length(), zone());
- ZoneList<HBasicBlock*> reverse_result(8);
+ ZoneList<HBasicBlock*> reverse_result(8, zone());
HBasicBlock* start = blocks_[0];
Postorder(start, &visited, &reverse_result, NULL);
@@ -761,7 +765,7 @@
int index = 0;
for (int i = reverse_result.length() - 1; i >= 0; --i) {
HBasicBlock* b = reverse_result[i];
- blocks_.Add(b);
+ blocks_.Add(b, zone());
b->set_block_id(index++);
}
}
@@ -807,7 +811,7 @@
ASSERT(block->end()->SecondSuccessor() == NULL ||
order->Contains(block->end()->SecondSuccessor()) ||
block->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block);
+ order->Add(block, zone());
}
@@ -849,9 +853,9 @@
// Worklist of phis that can potentially be eliminated. Initialized with
// all phi nodes. When elimination of a phi node modifies another phi node
// the modified phi node is added to the worklist.
- ZoneList<HPhi*> worklist(blocks_.length());
+ ZoneList<HPhi*> worklist(blocks_.length(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
- worklist.AddAll(*blocks_[i]->phis());
+ worklist.AddAll(*blocks_[i]->phis(), zone());
}
while (!worklist.is_empty()) {
@@ -869,7 +873,7 @@
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
value->SetOperandAt(it.index(), replacement);
- if (value->IsPhi()) worklist.Add(HPhi::cast(value));
+ if (value->IsPhi()) worklist.Add(HPhi::cast(value), zone());
}
block->RemovePhi(phi);
}
@@ -881,18 +885,18 @@
HPhase phase("H_Unreachable phi elimination", this);
// Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length());
- ZoneList<HPhi*> worklist(blocks_.length());
+ ZoneList<HPhi*> phi_list(blocks_.length(), zone());
+ ZoneList<HPhi*> worklist(blocks_.length(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi);
+ phi_list.Add(phi, zone());
// We can't eliminate phis in the receiver position in the environment
// because in case of throwing an error we need this value to
// construct a stack trace.
if (phi->HasRealUses() || phi->IsReceiver()) {
phi->set_is_live(true);
- worklist.Add(phi);
+ worklist.Add(phi, zone());
}
}
}
@@ -904,7 +908,7 @@
HValue* operand = phi->OperandAt(i);
if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand));
+ worklist.Add(HPhi::cast(operand), zone());
}
}
}
@@ -951,11 +955,11 @@
void HGraph::CollectPhis() {
int block_count = blocks_.length();
- phi_list_ = new ZoneList<HPhi*>(block_count);
+ phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi);
+ phi_list_->Add(phi, zone());
}
}
}
@@ -976,7 +980,7 @@
HValue* use = it.value();
if (!in_worklist.Contains(use->id())) {
in_worklist.Add(use->id());
- worklist->Add(use);
+ worklist->Add(use, zone());
}
}
}
@@ -987,7 +991,7 @@
class HRangeAnalysis BASE_EMBEDDED {
public:
explicit HRangeAnalysis(HGraph* graph) :
- graph_(graph), zone_(graph->isolate()->zone()), changed_ranges_(16) { }
+ graph_(graph), zone_(graph->zone()), changed_ranges_(16, zone_) { }
void Analyze();
@@ -1132,7 +1136,7 @@
void HRangeAnalysis::AddRange(HValue* value, Range* range) {
Range* original_range = value->range();
value->AddNewRange(range, zone_);
- changed_ranges_.Add(value);
+ changed_ranges_.Add(value, zone_);
Range* new_range = value->range();
TraceRange("Updated range of %d set to [%d,%d]\n",
value->id(),
@@ -1260,18 +1264,18 @@
}
-void HValueMap::Resize(int new_size) {
+void HValueMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
// Make sure we have at least one free element.
if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
+ ResizeLists(lists_size_ << 1, zone);
}
HValueMapListElement* new_array =
- ZONE->NewArray<HValueMapListElement>(new_size);
+ zone->NewArray<HValueMapListElement>(new_size);
memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_array = array_;
@@ -1289,14 +1293,14 @@
if (old_array[i].value != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value);
+ Insert(lists_[current].value, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
// Rehash the directly stored value.
- Insert(old_array[i].value);
+ Insert(old_array[i].value, zone);
}
}
}
@@ -1305,11 +1309,11 @@
}
-void HValueMap::ResizeLists(int new_size) {
+void HValueMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
HValueMapListElement* new_lists =
- ZONE->NewArray<HValueMapListElement>(new_size);
+ zone->NewArray<HValueMapListElement>(new_size);
memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_lists = lists_;
@@ -1328,10 +1332,10 @@
}
-void HValueMap::Insert(HValue* value) {
+void HValueMap::Insert(HValue* value, Zone* zone) {
ASSERT(value != NULL);
// Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1);
+ if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
@@ -1340,7 +1344,7 @@
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
+ ResizeLists(lists_size_ << 1, zone);
}
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
@@ -1364,7 +1368,9 @@
HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
- memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ if (this != &other) {
+ memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ }
return *this;
}
@@ -1478,12 +1484,14 @@
: graph_(graph),
info_(info),
removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length()),
- loop_side_effects_(graph->blocks()->length()),
+ block_side_effects_(graph->blocks()->length(), graph->zone()),
+ loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
ASSERT(info->isolate()->heap()->allow_allocation(false));
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
}
~HGlobalValueNumberer() {
ASSERT(!info_->isolate()->heap()->allow_allocation(true));
@@ -1509,7 +1517,7 @@
HGraph* graph() { return graph_; }
CompilationInfo* info() { return info_; }
- Zone* zone() { return graph_->zone(); }
+ Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
CompilationInfo* info_;
@@ -1949,7 +1957,7 @@
// GvnBasicBlockState instances.
void HGlobalValueNumberer::AnalyzeGraph() {
HBasicBlock* entry_block = graph_->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap();
+ HValueMap* entry_map = new(zone()) HValueMap(zone());
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
@@ -1993,7 +2001,7 @@
if (instr->HasSideEffects()) removed_side_effects_ = true;
instr->DeleteAndReplaceWith(other);
} else {
- map->Add(instr);
+ map->Add(instr, zone());
}
}
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
@@ -2049,7 +2057,7 @@
public:
explicit HInferRepresentation(HGraph* graph)
: graph_(graph),
- worklist_(8),
+ worklist_(8, graph->zone()),
in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
void Analyze();
@@ -2061,7 +2069,7 @@
void AddDependantsToWorklist(HValue* current);
void InferBasedOnUses(HValue* current);
- Zone* zone() { return graph_->zone(); }
+ Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
ZoneList<HValue*> worklist_;
@@ -2073,7 +2081,7 @@
if (current->representation().IsSpecialization()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current);
+ worklist_.Add(current, zone());
in_worklist_.Add(current->id());
}
@@ -2140,8 +2148,16 @@
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- Representation rep = use->RequiredInputRepresentation(it.index());
+ Representation rep = use->ObservedInputRepresentation(it.index());
if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("%d %s is used by %d %s as %s\n",
+ value->id(),
+ value->Mnemonic(),
+ use->id(),
+ use->Mnemonic(),
+ rep.Mnemonic());
+ }
if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
use_count[rep.kind()] += use->LoopWeight();
}
@@ -2176,12 +2192,12 @@
// bit-vector of length <number of phis>.
const ZoneList<HPhi*>* phi_list = graph_->phi_list();
int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count);
+ ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->InitRealUses(i);
BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
connected_set->Add(i);
- connected_phis.Add(connected_set);
+ connected_phis.Add(connected_set, zone());
}
// (2) Do a fixed point iteration to find the set of connected phis. A
@@ -2205,21 +2221,34 @@
}
}
- // (3) Use the phi reachability information from step 2 to
- // (a) sum up the non-phi use counts of all connected phis.
- // (b) push information about values which can't be converted to integer
- // without deoptimization through the phi use-def chains, avoiding
- // unnecessary deoptimizations later.
+ // (3a) Use the phi reachability information from step 2 to
+ // push information about values which can't be converted to integer
+ // without deoptimization through the phi use-def chains, avoiding
+ // unnecessary deoptimizations later.
for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
bool cti = phi->AllOperandsConvertibleToInteger();
+ if (cti) continue;
+
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ HPhi* phi = phi_list->at(it.Current());
+ phi->set_is_convertible_to_integer(false);
+ phi->ResetInteger32Uses();
+ }
+ }
+
+ // (3b) Use the phi reachability information from step 2 to
+ // sum up the non-phi use counts of all connected phis.
+ for (int i = 0; i < phi_count; ++i) {
+ HPhi* phi = phi_list->at(i);
for (BitVector::Iterator it(connected_phis.at(i));
!it.Done();
it.Advance()) {
int index = it.Current();
- HPhi* it_use = phi_list->at(it.Current());
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice!
- if (!cti) it_use->set_is_convertible_to_integer(false);
+ HPhi* it_use = phi_list->at(index);
+ if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
}
}
@@ -2277,9 +2306,9 @@
i = last_back_edge->block_id();
// Update phis of the loop header now after the whole loop body is
// guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length());
+ ZoneList<HValue*> worklist(block->phis()->length(), zone());
for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j));
+ worklist.Add(block->phis()->at(j), zone());
}
InferTypes(&worklist);
}
@@ -2346,8 +2375,8 @@
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
new_value = is_truncating
- ? constant->CopyToTruncatedInt32()
- : constant->CopyToRepresentation(to);
+ ? constant->CopyToTruncatedInt32(zone())
+ : constant->CopyToRepresentation(to, zone());
}
if (new_value == NULL) {
@@ -2766,7 +2795,7 @@
HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info());
+ graph_ = new(zone()) HGraph(info(), zone());
if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
{
@@ -3117,20 +3146,22 @@
class BoundsCheckTable : private ZoneHashMap {
public:
- BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key) {
+ BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone) {
return reinterpret_cast<BoundsCheckBbData**>(
- &(Lookup(key, key->Hash(), true)->value));
+ &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
}
- void Insert(BoundsCheckKey* key, BoundsCheckBbData* data) {
- Lookup(key, key->Hash(), true)->value = data;
+ void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone) {
+ Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
}
void Delete(BoundsCheckKey* key) {
Remove(key, key->Hash());
}
- BoundsCheckTable() : ZoneHashMap(BoundsCheckKeyMatch) { }
+ explicit BoundsCheckTable(Zone* zone)
+ : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) { }
};
@@ -3153,8 +3184,8 @@
int32_t offset;
BoundsCheckKey* key =
- BoundsCheckKey::Create(bb->zone(), check, &offset);
- BoundsCheckBbData** data_p = table->LookupOrInsert(key);
+ BoundsCheckKey::Create(zone(), check, &offset);
+ BoundsCheckBbData** data_p = table->LookupOrInsert(key, zone());
BoundsCheckBbData* data = *data_p;
if (data == NULL) {
bb_data_list = new(zone()) BoundsCheckBbData(key,
@@ -3176,14 +3207,14 @@
int32_t new_upper_offset = offset > data->UpperOffset()
? offset
: data->UpperOffset();
- bb_data_list = new(bb->zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- check,
- bb_data_list,
- data);
- table->Insert(key, bb_data_list);
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ new_lower_offset,
+ new_upper_offset,
+ bb,
+ check,
+ bb_data_list,
+ data);
+ table->Insert(key, bb_data_list, zone());
}
}
@@ -3196,7 +3227,7 @@
data = data->NextInBasicBlock()) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
- table->Insert(data->Key(), data->FatherInDominatorTree());
+ table->Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table->Delete(data->Key());
}
@@ -3207,7 +3238,7 @@
void HGraph::EliminateRedundantBoundsChecks() {
HPhase phase("H_Eliminate bounds checks", this);
AssertNoAllocation no_gc;
- BoundsCheckTable checks_table;
+ BoundsCheckTable checks_table(zone());
EliminateRedundantBoundsChecks(entry_block(), &checks_table);
}
@@ -3327,9 +3358,9 @@
template <class Instruction>
HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
- ZoneList<HValue*> arguments(count);
+ ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
- arguments.Add(Pop());
+ arguments.Add(Pop(), zone());
}
while (!arguments.is_empty()) {
@@ -3829,13 +3860,13 @@
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
ZoneList<HUnknownOSRValue*>* osr_values =
- new(zone()) ZoneList<HUnknownOSRValue*>(length);
+ new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
for (int i = 0; i < first_expression_index; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Bind(i, osr_value);
- osr_values->Add(osr_value);
+ osr_values->Add(osr_value, zone());
}
if (first_expression_index != length) {
@@ -3844,7 +3875,7 @@
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
- osr_values->Add(osr_value);
+ osr_values->Add(osr_value, zone());
}
}
@@ -4097,8 +4128,7 @@
HValue* key = AddInstruction(
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0), // Iteration index.
- OMIT_HOLE_CHECK));
+ environment()->ExpressionStackAt(0))); // Iteration index.
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -4472,7 +4502,7 @@
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
@@ -4491,7 +4521,8 @@
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
- HInstruction* store = BuildStoreNamed(literal, value, property);
+ HInstruction* store;
+ CHECK_ALIVE(store = BuildStoreNamed(literal, value, property));
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
@@ -4658,9 +4689,37 @@
Handle<Map> type,
LookupResult* lookup,
bool smi_and_map_check) {
+ ASSERT(lookup->IsFound());
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, type, zone()));
+ }
+
+ // If the property does not exist yet, we have to check that it wasn't made
+ // readonly or turned into a setter by some meanwhile modifications on the
+ // prototype chain.
+ if (!lookup->IsProperty()) {
+ Object* proto = type->prototype();
+ // First check that the prototype chain isn't affected already.
+ LookupResult proto_result(isolate());
+ proto->Lookup(*name, &proto_result);
+ if (proto_result.IsProperty()) {
+ // If the inherited property could induce readonly-ness, bail out.
+ if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
+ Bailout("improper object on prototype chain for store");
+ return NULL;
+ }
+ // We only need to check up to the preexisting property.
+ proto = proto_result.holder();
+ } else {
+ // Otherwise, find the top prototype.
+ while (proto->GetPrototype()->IsJSObject()) proto = proto->GetPrototype();
+ ASSERT(proto->GetPrototype()->IsNull());
+ }
+ ASSERT(proto->IsJSObject());
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ Handle<JSObject>(JSObject::cast(type->prototype())),
+ Handle<JSObject>(JSObject::cast(proto))));
}
int index = ComputeLoadStoreFieldIndex(type, name, lookup);
@@ -4778,14 +4837,15 @@
// for all maps. Requires special map check on the set of all handled maps.
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
- AddInstruction(new(zone()) HCheckMaps(object, types));
+ AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
instr = BuildLoadNamedField(object, expr, map, &lookup, false);
} else {
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(context,
object,
types,
- name);
+ name,
+ zone());
}
instr->set_position(expr->position());
@@ -4819,8 +4879,9 @@
current_block()->Finish(compare);
set_current_block(if_true);
- HInstruction* instr =
- BuildStoreNamedField(object, name, value, map, &lookup, false);
+ HInstruction* instr;
+ CHECK_ALIVE(instr =
+ BuildStoreNamedField(object, name, value, map, &lookup, false));
instr->set_position(expr->position());
// Goto will add the HSimulate for the store.
AddInstruction(instr);
@@ -4871,7 +4932,7 @@
void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* value = NULL;
@@ -4888,10 +4949,8 @@
ASSERT(!name.is_null());
SmallMapList* types = expr->GetReceiverTypes();
- LookupResult lookup(isolate());
-
if (expr->IsMonomorphic()) {
- instr = BuildStoreNamed(object, value, expr);
+ CHECK_ALIVE(instr = BuildStoreNamed(object, value, expr));
} else if (types != NULL && types->length() > 1) {
HandlePolymorphicStoreNamedField(expr, object, value, types, name);
@@ -5044,7 +5103,7 @@
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle());
+ prop->RecordTypeFeedback(oracle(), zone());
if (prop->key()->IsPropertyName()) {
// Named property.
@@ -5070,7 +5129,8 @@
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
- HInstruction* store = BuildStoreNamed(obj, instr, prop);
+ HInstruction* store;
+ CHECK_ALIVE(store = BuildStoreNamed(obj, instr, prop));
AddInstruction(store);
// Drop the simulated receiver and value. Return the value.
Drop(2);
@@ -5102,7 +5162,7 @@
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -5150,7 +5210,7 @@
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
- AddInstruction(new HUseConst(old_value));
+ AddInstruction(new(zone()) HUseConst(old_value));
}
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
@@ -5277,7 +5337,7 @@
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, type, zone()));
}
int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -5321,7 +5381,7 @@
true);
} else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
AddInstruction(new(zone()) HCheckNonSmi(obj));
- AddInstruction(HCheckMaps::NewWithTransitions(obj, map));
+ AddInstruction(HCheckMaps::NewWithTransitions(obj, map, zone()));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
} else {
@@ -5422,7 +5482,8 @@
if (IsFastDoubleElementsKind(elements_kind)) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
} else { // Smi or Object elements.
- return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode);
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key,
+ elements_kind);
}
}
@@ -5434,7 +5495,7 @@
Handle<Map> map,
bool is_store) {
HInstruction* mapcheck =
- AddInstruction(new(zone()) HCheckMaps(object, map, dependency));
+ AddInstruction(new(zone()) HCheckMaps(object, map, zone(), dependency));
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
@@ -5451,7 +5512,7 @@
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map());
+ elements, isolate()->factory()->fixed_array_map(), zone());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
}
@@ -5470,7 +5531,8 @@
fast_elements ||
map->has_fast_double_elements());
if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
+ length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
+ HType::Smi()));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
@@ -5534,7 +5596,7 @@
AddInstruction(transition);
} else {
type_todo[map->elements_kind()] = true;
- if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+ if (IsExternalArrayElementsKind(map->elements_kind())) {
todo_external_array = true;
}
num_untransitionable_maps++;
@@ -5558,7 +5620,7 @@
return is_store ? NULL : instr;
}
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
HInstruction* elements_kind_instr =
@@ -5605,7 +5667,7 @@
if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(),
- elements_kind_branch));
+ zone(), elements_kind_branch));
}
// TODO(jkummerow): The need for these two blocks could be avoided
// in one of two ways:
@@ -5625,7 +5687,8 @@
set_current_block(if_jsarray);
HInstruction* length;
- length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
+ length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
+ HType::Smi()));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind, is_store));
@@ -5812,7 +5875,7 @@
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
if (TryArgumentsAccess(expr)) return;
@@ -5823,13 +5886,12 @@
HValue* array = Pop();
AddInstruction(new(zone()) HCheckNonSmi(array));
HInstruction* mapcheck =
- AddInstruction(HCheckInstanceType::NewIsJSArray(array));
+ AddInstruction(HCheckInstanceType::NewIsJSArray(array, zone()));
instr = new(zone()) HJSArrayLength(array, mapcheck);
-
} else if (expr->IsStringLength()) {
HValue* string = Pop();
AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = new(zone()) HStringLength(string);
} else if (expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
@@ -5897,7 +5959,8 @@
// its prototypes.
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(receiver));
- AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map));
+ AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map,
+ zone()));
}
if (!expr->holder().is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
@@ -6232,7 +6295,7 @@
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(target_info.scope());
+ ScopeInfo::Create(target_info.scope(), zone());
target_shared->set_scope_info(*target_scope_info);
}
target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -6251,7 +6314,8 @@
TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()),
- isolate());
+ isolate(),
+ zone());
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
@@ -6271,8 +6335,9 @@
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
- HConstant* context = new HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
+ HConstant* context =
+ new(zone()) HConstant(Handle<Context>(target->context()),
+ Representation::Tagged());
AddInstruction(context);
inner_env->BindContext(context);
#endif
@@ -6287,9 +6352,9 @@
if (function->scope()->arguments() != NULL) {
HEnvironment* arguments_env = inner_env->arguments_environment();
int arguments_count = arguments_env->parameter_count();
- arguments_values = new(zone()) ZoneList<HValue*>(arguments_count);
+ arguments_values = new(zone()) ZoneList<HValue*>(arguments_count, zone());
for (int i = 0; i < arguments_count; i++) {
- arguments_values->Add(arguments_env->Lookup(i));
+ arguments_values->Add(arguments_env->Lookup(i), zone());
}
}
@@ -6864,6 +6929,11 @@
return;
}
if (TryInlineCall(expr)) return;
+
+ if (expr->target().is_identical_to(info()->closure())) {
+ graph()->MarkRecursive();
+ }
+
call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
argument_count));
} else {
@@ -6884,8 +6954,8 @@
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global = new(zone()) HGlobalObject(context);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
AddInstruction(global);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
@@ -6915,8 +6985,8 @@
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(global_object);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(receiver);
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
@@ -7322,7 +7392,7 @@
} else {
// Argument of the count operation is a property.
ASSERT(prop != NULL);
- prop->RecordTypeFeedback(oracle());
+ prop->RecordTypeFeedback(oracle(), zone());
if (prop->key()->IsPropertyName()) {
// Named property.
@@ -7345,7 +7415,8 @@
after = BuildIncrement(returns_original_input, expr);
input = Pop();
- HInstruction* store = BuildStoreNamed(obj, after, prop);
+ HInstruction* store;
+ CHECK_ALIVE(store = BuildStoreNamed(obj, after, prop));
AddInstruction(store);
// Overwrite the receiver in the bailout environment with the result
@@ -7375,7 +7446,7 @@
after = BuildIncrement(returns_original_input, expr);
input = Pop();
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -7401,7 +7472,7 @@
HValue* string,
HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HStringLength* length = new(zone()) HStringLength(string);
AddInstruction(length);
HInstruction* checked_index =
@@ -7425,9 +7496,9 @@
case Token::ADD:
if (info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsString(left));
+ AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsString(right));
+ AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
instr = new(zone()) HStringAdd(context, left, right);
} else {
instr = HAdd::NewHAdd(zone(), context, left, right);
@@ -7473,8 +7544,10 @@
}
Representation rep = ToRepresentation(info);
// We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
- rep = Representation::Integer32();
+ if (instr->IsBitwiseBinaryOperation()) {
+ HBitwiseBinaryOperation::cast(instr)->
+ InitializeObservedInputRepresentation(rep);
+ if (rep.IsDouble()) rep = Representation::Integer32();
}
TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
@@ -7827,18 +7900,18 @@
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckMaps::NewWithTransitions(left, map));
+ AddInstruction(HCheckMaps::NewWithTransitions(left, map, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckMaps::NewWithTransitions(right, map));
+ AddInstruction(HCheckMaps::NewWithTransitions(right, map, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@@ -7851,9 +7924,9 @@
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@@ -7925,10 +7998,10 @@
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_.Add(variable->name());
+ globals_.Add(variable->name(), zone());
globals_.Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ : isolate()->factory()->undefined_value(), zone());
return;
case Variable::PARAMETER:
case Variable::LOCAL:
@@ -7941,7 +8014,7 @@
if (hole_init) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new HStoreContextSlot(
+ HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
@@ -7958,12 +8031,12 @@
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_.Add(variable->name());
+ globals_.Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_.Add(function);
+ globals_.Add(function, zone());
return;
}
case Variable::PARAMETER:
@@ -7977,7 +8050,7 @@
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new HStoreContextSlot(
+ HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
@@ -8223,11 +8296,11 @@
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
Handle<String> name = isolate()->factory()->undefined_symbol();
- AddInstruction(new HStoreNamedField(object,
- name,
- value,
- true, // in-object store.
- JSValue::kValueOffset));
+ AddInstruction(new(zone()) HStoreNamedField(object,
+ name,
+ value,
+ true, // in-object store.
+ JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());
set_current_block(join);
@@ -8515,10 +8588,11 @@
HEnvironment::HEnvironment(HEnvironment* outer,
Scope* scope,
- Handle<JSFunction> closure)
+ Handle<JSFunction> closure,
+ Zone* zone)
: closure_(closure),
- values_(0),
- assigned_variables_(4),
+ values_(0, zone),
+ assigned_variables_(4, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
@@ -8526,14 +8600,15 @@
outer_(outer),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber) {
+ ast_id_(AstNode::kNoNumber),
+ zone_(zone) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
-HEnvironment::HEnvironment(const HEnvironment* other)
- : values_(0),
- assigned_variables_(0),
+HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
+ : values_(0, zone),
+ assigned_variables_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
@@ -8541,7 +8616,8 @@
outer_(NULL),
pop_count_(0),
push_count_(0),
- ast_id_(other->ast_id()) {
+ ast_id_(other->ast_id()),
+ zone_(zone) {
Initialize(other);
}
@@ -8549,17 +8625,19 @@
HEnvironment::HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
- int arguments)
+ int arguments,
+ Zone* zone)
: closure_(closure),
- values_(arguments),
- assigned_variables_(0),
+ values_(arguments, zone),
+ assigned_variables_(0, zone),
frame_type_(frame_type),
parameter_count_(arguments),
local_count_(0),
outer_(outer),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber) {
+ ast_id_(AstNode::kNoNumber),
+ zone_(zone) {
}
@@ -8571,15 +8649,15 @@
// Avoid reallocating the temporaries' backing store on the first Push.
int total = parameter_count + specials_count_ + local_count + stack_height;
- values_.Initialize(total + 4);
- for (int i = 0; i < total; ++i) values_.Add(NULL);
+ values_.Initialize(total + 4, zone());
+ for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
}
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
- values_.AddAll(other->values_);
- assigned_variables_.AddAll(other->assigned_variables_);
+ values_.AddAll(other->values_, zone());
+ assigned_variables_.AddAll(other->assigned_variables_, zone());
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
@@ -8607,7 +8685,7 @@
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new(block->zone()) HPhi(i);
+ HPhi* phi = new(zone()) HPhi(i, zone());
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
@@ -8623,7 +8701,7 @@
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
if (!assigned_variables_.Contains(index)) {
- assigned_variables_.Add(index);
+ assigned_variables_.Add(index, zone());
}
values_[index] = value;
}
@@ -8663,7 +8741,7 @@
HEnvironment* HEnvironment::Copy() const {
- return new(closure()->GetIsolate()->zone()) HEnvironment(this);
+ return new(zone()) HEnvironment(this, zone());
}
@@ -8677,7 +8755,7 @@
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new(loop_header->zone()) HPhi(i);
+ HPhi* phi = new(zone()) HPhi(i, zone());
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
loop_header->AddPhi(phi);
@@ -8691,8 +8769,9 @@
Handle<JSFunction> target,
FrameType frame_type,
int arguments) const {
- HEnvironment* new_env = new(closure()->GetIsolate()->zone())
- HEnvironment(outer, target, frame_type, arguments + 1);
+ HEnvironment* new_env =
+ new(zone()) HEnvironment(outer, target, frame_type,
+ arguments + 1, zone());
for (int i = 0; i <= arguments; ++i) { // Include receiver.
new_env->Push(ExpressionStackAt(arguments - i));
}
@@ -8732,7 +8811,7 @@
}
HEnvironment* inner =
- new(zone) HEnvironment(outer, function->scope(), target);
+ new(zone) HEnvironment(outer, function->scope(), target, zone);
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
HValue* push = (i <= arguments) ?
@@ -8922,27 +9001,28 @@
const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
for (int i = 0; i < fixed_d->length(); ++i) {
- TraceLiveRange(fixed_d->at(i), "fixed");
+ TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
}
const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
for (int i = 0; i < fixed->length(); ++i) {
- TraceLiveRange(fixed->at(i), "fixed");
+ TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
}
const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
for (int i = 0; i < live_ranges->length(); ++i) {
- TraceLiveRange(live_ranges->at(i), "object");
+ TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
}
}
-void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
+void HTracer::TraceLiveRange(LiveRange* range, const char* type,
+ Zone* zone) {
if (range != NULL && !range->IsEmpty()) {
PrintIndent();
trace_.Add("%d %s", range->id(), type);
if (range->HasRegisterAssigned()) {
- LOperand* op = range->CreateAssignedOperand(ZONE);
+ LOperand* op = range->CreateAssignedOperand(zone);
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
diff --git a/src/hydrogen.h b/src/hydrogen.h
index a7f5025..6fa3d1b 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -77,7 +77,7 @@
return &deleted_phis_;
}
void RecordDeletedPhi(int merge_index) {
- deleted_phis_.Add(merge_index);
+ deleted_phis_.Add(merge_index, zone());
}
HBasicBlock* dominator() const { return dominator_; }
HEnvironment* last_environment() const { return last_environment_; }
@@ -158,7 +158,7 @@
dominates_loop_successors_ = true;
}
- inline Zone* zone();
+ inline Zone* zone() const;
#ifdef DEBUG
void Verify();
@@ -212,12 +212,12 @@
class HLoopInformation: public ZoneObject {
public:
- explicit HLoopInformation(HBasicBlock* loop_header)
- : back_edges_(4),
+ HLoopInformation(HBasicBlock* loop_header, Zone* zone)
+ : back_edges_(4, zone),
loop_header_(loop_header),
- blocks_(8),
+ blocks_(8, zone),
stack_check_(NULL) {
- blocks_.Add(loop_header);
+ blocks_.Add(loop_header, zone);
}
virtual ~HLoopInformation() {}
@@ -244,10 +244,10 @@
class BoundsCheckTable;
class HGraph: public ZoneObject {
public:
- explicit HGraph(CompilationInfo* info);
+ HGraph(CompilationInfo* info, Zone* zone);
Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() const { return zone_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -304,7 +304,7 @@
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
- values_.Add(value);
+ values_.Add(value, zone());
return values_.length() - 1;
}
HValue* LookupValue(int id) const {
@@ -336,6 +336,14 @@
osr_values_.set(values);
}
+ void MarkRecursive() {
+ is_recursive_ = true;
+ }
+
+ bool is_recursive() const {
+ return is_recursive_;
+ }
+
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
@@ -380,11 +388,15 @@
SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+ Zone* zone_;
+
+ bool is_recursive_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-Zone* HBasicBlock::zone() { return graph_->zone(); }
+Zone* HBasicBlock::zone() const { return graph_->zone(); }
// Type of stack frame an environment might refer to.
@@ -395,7 +407,8 @@
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
- Handle<JSFunction> closure);
+ Handle<JSFunction> closure,
+ Zone* zone);
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@@ -462,7 +475,7 @@
void Push(HValue* value) {
ASSERT(value != NULL);
++push_count_;
- values_.Add(value);
+ values_.Add(value, zone());
}
HValue* Pop() {
@@ -519,13 +532,16 @@
void PrintTo(StringStream* stream);
void PrintToStd();
+ Zone* zone() const { return zone_; }
+
private:
- explicit HEnvironment(const HEnvironment* other);
+ HEnvironment(const HEnvironment* other, Zone* zone);
HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
- int arguments);
+ int arguments,
+ Zone* zone);
// Create an artificial stub environment (e.g. for argument adaptor or
// constructor stub).
@@ -563,6 +579,7 @@
int pop_count_;
int push_count_;
int ast_id_;
+ Zone* zone_;
};
@@ -607,7 +624,7 @@
HGraphBuilder* owner() const { return owner_; }
- inline Zone* zone();
+ inline Zone* zone() const;
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
@@ -821,7 +838,7 @@
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle, Zone* zone);
HGraph* CreateGraph();
@@ -1144,7 +1161,7 @@
Handle<Map> receiver_map,
bool smi_and_map_check);
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -1176,12 +1193,12 @@
};
-Zone* AstContext::zone() { return owner_->zone(); }
+Zone* AstContext::zone() const { return owner_->zone(); }
class HValueMap: public ZoneObject {
public:
- HValueMap()
+ explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
@@ -1189,15 +1206,15 @@
array_(NULL),
lists_(NULL),
free_list_head_(kNil) {
- ResizeLists(kInitialSize);
- Resize(kInitialSize);
+ ResizeLists(kInitialSize, zone);
+ Resize(kInitialSize, zone);
}
void Kill(GVNFlagSet flags);
- void Add(HValue* value) {
+ void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags());
- Insert(value);
+ Insert(value, zone);
}
HValue* Lookup(HValue* value) const;
@@ -1221,9 +1238,9 @@
HValueMap(Zone* zone, const HValueMap* other);
- void Resize(int new_size);
- void ResizeLists(int new_size);
- void Insert(HValue* value);
+ void Resize(int new_size, Zone* zone);
+ void ResizeLists(int new_size, Zone* zone);
+ void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
@@ -1376,7 +1393,7 @@
WriteChars(filename, "", 0, false);
}
- void TraceLiveRange(LiveRange* range, const char* type);
+ void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
void Trace(const char* name, HGraph* graph, LChunk* chunk);
void FlushToFile();
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 73961e1..326207f 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -351,10 +351,12 @@
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
+ PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id,
input_frame_size,
- output_frame_size);
+ output_frame_size,
+ input_->GetRegister(ebp.code()),
+ input_->GetRegister(esp.code()));
}
// There's only one output frame in the OSR case.
@@ -404,7 +406,7 @@
name = "function";
break;
}
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
@@ -415,6 +417,24 @@
output_offset -= kPointerSize;
}
+ // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+ int frame_pointer = input_->GetRegister(ebp.code());
+ if ((frame_pointer & kPointerSize) != 0) {
+ frame_pointer -= kPointerSize;
+ has_alignment_padding_ = 1;
+ }
+
+ int32_t alignment_state = (has_alignment_padding_ == 1) ?
+ kAlignmentPaddingPushed :
+ kNoAlignmentPadding;
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
+ output_offset,
+ alignment_state);
+ }
+ output_[0]->SetFrameSlot(output_offset, alignment_state);
+ output_offset -= kPointerSize;
+
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
@@ -427,7 +447,7 @@
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+ output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -688,24 +708,38 @@
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+
+ unsigned alignment_state_offset =
+ input_offset - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ has_alignment_padding_ =
+ (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
// 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
@@ -747,13 +781,17 @@
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+ ASSERT(!is_bottommost ||
+ (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
+ fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
+ ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
@@ -948,6 +986,28 @@
}
__ pop(eax);
+ if (type() != OSR) {
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, "alignment marker expected");
+ }
+ __ bind(&no_padding);
+ } else {
+ // If frame needs dynamic alignment push padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ push(Immediate(kAlignmentZapValue));
+ __ bind(&no_padding);
+ }
+
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 9e51857..18915e2 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -53,6 +53,10 @@
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
+const int kNoAlignmentPadding = 0;
+const int kAlignmentPaddingPushed = 2;
+const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
+
// ----------------------------------------------------
@@ -119,6 +123,8 @@
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
+
+ static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 9727ea0..5a513fd 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -782,10 +782,10 @@
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ : isolate()->factory()->undefined_value(), zone());
break;
case Variable::PARAMETER:
@@ -840,12 +840,12 @@
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
+ globals_->Add(function, zone());
break;
}
@@ -896,8 +896,8 @@
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
@@ -1557,7 +1557,7 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -4476,14 +4476,49 @@
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
__ push(edx);
+
// Store result register while executing finally block.
__ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_obj));
+ __ push(edx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(edx, Operand::StaticVariable(has_pending_message));
+ __ push(edx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_script));
+ __ push(edx);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
+ // Restore pending message from stack.
+ __ pop(edx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(Operand::StaticVariable(pending_message_script), edx);
+
+ __ pop(edx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(Operand::StaticVariable(has_pending_message), edx);
+
+ __ pop(edx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+
+ // Restore result register from stack.
__ pop(result_register());
+
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 7966e19..7fd64ca 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -79,6 +79,10 @@
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ info()->osr_ast_id() != AstNode::kNoNumber;
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -153,14 +157,52 @@
__ bind(&ok);
}
+
+ if (dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ __ test(esp, Immediate(kPointerSize));
+ __ Assert(zero, "frame is expected to be aligned");
+ }
+
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- if (slots > 0) {
+ ASSERT_GE(slots, 1);
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
+ } else {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
Label loop;
@@ -170,7 +212,7 @@
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
+ #ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
@@ -180,7 +222,18 @@
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
-#endif
+ #endif
+ }
+
+ // Store dynamic frame alignment state in the first local.
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ edx);
+ } else {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ Immediate(kNoAlignmentPadding));
}
}
@@ -544,7 +597,7 @@
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -566,19 +619,22 @@
__ push(eax);
__ push(ebx);
__ mov(ebx, shared);
- __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
+ __ mov(eax,
+ FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
+ eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
+ eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
@@ -639,7 +695,7 @@
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -686,7 +742,7 @@
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
}
@@ -699,7 +755,7 @@
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -1985,7 +2041,7 @@
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->InputAt(1));
@@ -2098,8 +2154,25 @@
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (dynamic_frame_alignment_) {
+ // Fetch the state of the dynamic frame alignment.
+ __ mov(edx, Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+ }
__ mov(esp, ebp);
__ pop(ebp);
+ if (dynamic_frame_alignment_) {
+ Label no_padding;
+ __ cmp(edx, Immediate(kNoAlignmentPadding));
+ __ j(equal, &no_padding);
+ if (FLAG_debug_code) {
+ __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, "expected alignment marker");
+ }
+ __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+ __ bind(&no_padding);
+ }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -2229,12 +2302,12 @@
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsFound() && lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2246,9 +2319,23 @@
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ HeapObject* current = HeapObject::cast((*type)->prototype());
+ Heap* heap = type->GetHeap();
+ while (current != heap->null_value()) {
+ Handle<HeapObject> link(current);
+ __ LoadHeapObject(result, link);
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Handle<Map>(JSObject::cast(current)->map()));
+ DeoptimizeIf(not_equal, env);
+ current = HeapObject::cast(current->map()->prototype());
+ }
+ __ mov(result, factory()->undefined_value());
}
}
@@ -2270,6 +2357,18 @@
}
+// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
+// prototype chain, which causes unbounded code generation.
+static bool CompactEmit(
+ SmallMapList* list, Handle<String> name, int i, Isolate* isolate) {
+ LookupResult lookup(isolate);
+ Handle<Map> map = list->at(i);
+ map->LookupInDescriptors(NULL, *name, &lookup);
+ return lookup.IsFound() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
+}
+
+
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
@@ -2283,18 +2382,32 @@
}
Handle<String> name = instr->hydrogen()->name();
Label done;
+ bool all_are_compact = true;
+ for (int i = 0; i < map_count; ++i) {
+ if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
+ all_are_compact = false;
+ break;
+ }
+ }
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ Label check_passed;
+ __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ j(not_equal, &next, Label::kNear);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ jmp(&done, Label::kNear);
+ bool compact = all_are_compact ? true :
+ CompactEmit(instr->hydrogen()->types(), name, i, isolate());
+ __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
@@ -2432,8 +2545,13 @@
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
}
}
@@ -2908,7 +3026,7 @@
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -3110,7 +3228,7 @@
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
@@ -3573,18 +3691,25 @@
ElementsKind to_kind = to_map->elements_kind();
Label not_applicable;
+ bool is_simple_map_transition =
+ IsSimpleMapChangeTransition(from_kind, to_kind);
+ Label::Distance branch_distance =
+ is_simple_map_transition ? Label::kNear : Label::kFar;
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, ¬_applicable);
- __ mov(new_map_reg, to_map);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ __ j(not_equal, ¬_applicable, branch_distance);
+ if (is_simple_map_transition) {
Register object_reg = ToRegister(instr->object());
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ Handle<Map> map = instr->hydrogen()->transitioned_map();
+ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+ Immediate(map));
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+ ToRegister(instr->temp_reg()),
+ kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
+ __ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
@@ -3593,6 +3718,7 @@
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
+ __ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
@@ -3618,7 +3744,7 @@
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -3674,7 +3800,7 @@
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3749,7 +3875,7 @@
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
@@ -3817,7 +3943,7 @@
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->TempAt(0));
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
} else {
@@ -3863,6 +3989,10 @@
if (instr->needs_check()) {
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(ToRegister(input));
+ }
}
__ SmiUntag(ToRegister(input));
}
@@ -4017,7 +4147,7 @@
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4358,7 +4488,8 @@
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
@@ -4973,7 +5104,7 @@
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 28facc5..b241aaf 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -54,19 +54,20 @@
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, zone),
+ deoptimization_literals_(8, zone),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(zone),
- deferred_(8),
+ deferred_(8, zone),
+ dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
+ zone_(zone),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- zone_(zone) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -169,7 +170,7 @@
void Abort(const char* format, ...);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -313,7 +314,8 @@
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -342,6 +344,7 @@
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
+ bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
@@ -349,13 +352,13 @@
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ Zone* zone_;
+
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
- Zone* zone_;
-
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 510d9f1..6428916 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -37,7 +37,7 @@
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
- moves_(32),
+ moves_(32, owner->zone()),
source_uses_(),
destination_uses_(),
spilled_register_(-1) {}
@@ -157,7 +157,7 @@
LOperand* destination = move.destination();
if (destination->IsRegister()) ++destination_uses_[destination->index()];
- moves_.Add(move);
+ moves_.Add(move, cgen_->zone());
}
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 26af236..60366f7 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -368,7 +368,11 @@
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (is_double) {
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ }
return spill_slot_count_++;
}
@@ -376,9 +380,9 @@
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
+ return LStackSlot::Create(index, zone());
}
}
@@ -474,23 +478,23 @@
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
- instructions_.Add(gap);
+ instructions_.Add(gap, zone());
index = instructions_.length();
- instructions_.Add(instr);
+ instructions_.Add(instr, zone());
} else {
index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
+ pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
+ return LConstantOperand::Create(constant->id(), zone());
}
@@ -529,7 +533,8 @@
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
}
@@ -549,6 +554,12 @@
chunk_ = new(zone()) LChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // Reserve the first spill slot for the state of dynamic alignment.
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -764,7 +775,7 @@
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -1545,7 +1556,7 @@
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
- return new LIsStringAndBranch(UseRegister(instr->value()), temp);
+ return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
}
@@ -1571,7 +1582,7 @@
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LStringCompareAndBranch* result = new
+ LStringCompareAndBranch* result = new(zone())
LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
@@ -1696,8 +1707,9 @@
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
(truncating && CpuFeatures::IsSupported(SSE3))
@@ -1705,8 +1717,6 @@
: FixedTemp(xmm1);
LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 601c9c2..cd20631 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -327,8 +327,10 @@
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -2305,11 +2307,12 @@
public:
LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ num_double_slots_(0),
info_(info),
graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
@@ -2322,6 +2325,7 @@
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2354,11 +2358,14 @@
}
void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
+ inlined_closures_.Add(closure, zone());
}
+ Zone* zone() const { return graph_->zone(); }
+
private:
int spill_slot_count_;
+ int num_double_slots_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
@@ -2403,7 +2410,7 @@
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1b61486..2012a5a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -237,6 +237,68 @@
}
+void MacroAssembler::RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2,
+ SaveFPRegsMode save_fp) {
+ Label done;
+
+ Register address = scratch1;
+ Register value = scratch2;
+ if (emit_debug_code()) {
+ Label ok;
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ test_b(address, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ if (emit_debug_code()) {
+ AbortIfSmi(object);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ ASSERT(!isolate()->heap()->InNewSpace(*map));
+ CheckPageFlagForMap(map,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ // Delay the initialization of |address| and |value| for the stub until it's
+ // known that the will be needed. Up until this point their values are not
+ // needed since they are embedded in the operands of instructions that need
+ // them.
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ mov(value, Immediate(map));
+ RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value,
@@ -504,7 +566,7 @@
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
+ current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
@@ -2618,6 +2680,28 @@
}
+void MacroAssembler::CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ Page* page = Page::FromAddress(map->address());
+ ExternalReference reference(ExternalReference::page_flags(page));
+ // The inlined static address check of the page's flags relies
+ // on maps never being compacted.
+ ASSERT(!isolate()->heap()->mark_compact_collector()->
+ IsOnEvacuationCandidate(*map));
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ } else {
+ test(Operand::StaticVariable(reference), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index c71cad8..5c7a6d6 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -90,6 +90,13 @@
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -194,6 +201,16 @@
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
+ // For page containing |object| mark the region covering the object's map
+ // dirty. |object| is the object being stored into, |map| is the Map object
+ // that was stored.
+ void RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2,
+ SaveFPRegsMode save_fp);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 9731dac..07782cc 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -101,8 +101,10 @@
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index f135b0f..760fadc 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -44,7 +44,7 @@
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerIA32();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 4964c02..0e4ce20 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -745,10 +745,22 @@
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
@@ -757,7 +769,32 @@
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -768,11 +805,11 @@
if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
+ __ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
__ push(eax);
- __ push(scratch);
+ __ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -783,14 +820,14 @@
if (!transition.is_null()) {
// Update the map of the object.
- __ mov(scratch, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch);
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
- scratch,
+ scratch1,
name_reg,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -813,19 +850,19 @@
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch, offset), eax);
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, offset), eax);
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, eax);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -1124,8 +1161,9 @@
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -2484,8 +2522,13 @@
Label miss;
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
-
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ edx, ecx, ebx, edi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
__ mov(ecx, Immediate(name)); // restore name
@@ -2544,6 +2587,52 @@
}
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ Handle<String> name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(eax);
+
+ // Call the JavaScript getter with the receiver and the value on the stack.
+ __ push(edx);
+ __ push(eax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver,
Handle<String> name) {
@@ -2658,7 +2747,13 @@
__ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ edx, ecx, ebx, edi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2811,6 +2906,44 @@
}
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(edx);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
diff --git a/src/ic.cc b/src/ic.cc
index 134ef8b..d7f0f32 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -988,13 +988,25 @@
}
break;
case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- code = isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, callback);
+ Handle<Object> callback(lookup->GetCallbackObject());
+ if (callback->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->getter()) == 0) return;
+ if (!info->IsCompatibleReceiver(*receiver)) return;
+ code = isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, info);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter());
+ if (!getter->IsJSFunction()) return;
+ if (holder->IsGlobalObject()) return;
+ if (!receiver->HasFastProperties()) return;
+ code = isolate()->stub_cache()->ComputeLoadViaGetter(
+ name, receiver, holder, Handle<JSFunction>::cast(getter));
+ } else {
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ return;
+ }
break;
}
case INTERCEPTOR:
@@ -1256,6 +1268,7 @@
Handle<AccessorInfo> callback =
Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->getter()) == 0) return;
+ if (!callback->IsCompatibleReceiver(*receiver)) return;
code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
name, receiver, holder, callback);
break;
@@ -1408,7 +1421,11 @@
}
// Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ return receiver->SetProperty(*name,
+ *value,
+ NONE,
+ strict_mode,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -1433,6 +1450,7 @@
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
+ Handle<JSObject> holder(lookup->holder());
Handle<Code> code;
switch (type) {
case FIELD:
@@ -1460,18 +1478,30 @@
code = isolate()->stub_cache()->ComputeStoreGlobal(
name, global, cell, strict_mode);
} else {
- if (lookup->holder() != *receiver) return;
+ if (!holder.is_identical_to(receiver)) return;
code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->setter()) == 0) return;
- code = isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, callback, strict_mode);
+ Handle<Object> callback(lookup->GetCallbackObject());
+ if (callback->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->setter()) == 0) return;
+ ASSERT(info->IsCompatibleReceiver(*receiver));
+ code = isolate()->stub_cache()->ComputeStoreCallback(
+ name, receiver, info, strict_mode);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter());
+ if (!setter->IsJSFunction()) return;
+ if (holder->IsGlobalObject()) return;
+ if (!receiver->HasFastProperties()) return;
+ code = isolate()->stub_cache()->ComputeStoreViaSetter(
+ name, receiver, Handle<JSFunction>::cast(setter), strict_mode);
+ } else {
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ return;
+ }
break;
}
case INTERCEPTOR:
@@ -1481,7 +1511,6 @@
break;
case CONSTANT_FUNCTION:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
return;
case HANDLER:
case NULL_DESCRIPTOR:
@@ -1563,38 +1592,43 @@
}
bool monomorphic = false;
+ bool is_transition_stub = IsTransitionStubKind(stub_kind);
+ Handle<Map> receiver_map(receiver->map());
+ Handle<Map> monomorphic_map = receiver_map;
MapHandleList target_receiver_maps;
- if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
+ // yet will do so and stay there.
+ monomorphic = true;
+ } else {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- }
- if (!IsTransitionStubKind(stub_kind)) {
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- monomorphic = true;
- } else {
- if (ic_state == MONOMORPHIC) {
- // The first time a receiver is seen that is a transitioned version of
- // the previous monomorphic receiver type, assume the new ElementsKind
- // is the monomorphic type. This benefits global arrays that only
- // transition once, and all call sites accessing them are faster if they
- // remain monomorphic. If this optimistic assumption is not true, the IC
- // will miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- monomorphic = IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind());
- }
+ if (ic_state == MONOMORPHIC && is_transition_stub) {
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ monomorphic = IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind());
}
}
if (monomorphic) {
+ if (is_transition_stub) {
+ monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
+ ASSERT(*monomorphic_map != *receiver_map);
+ stub_kind = GetNoTransitionStubKind(stub_kind);
+ }
return ComputeMonomorphicStub(
- receiver, stub_kind, strict_mode, generic_stub);
+ monomorphic_map, stub_kind, strict_mode, generic_stub);
}
ASSERT(target() != *generic_stub);
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
- Handle<Map> receiver_map(receiver->map());
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
if (IsTransitionStubKind(stub_kind)) {
@@ -1655,16 +1689,16 @@
}
-Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
+Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<Map> receiver_map,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
- if (receiver->HasFastSmiOrObjectElements() ||
- receiver->HasExternalArrayElements() ||
- receiver->HasFastDoubleElements() ||
- receiver->HasDictionaryElements()) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsDictionaryElementsKind(elements_kind)) {
return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver, stub_kind, strict_mode);
+ receiver_map, stub_kind, strict_mode);
} else {
return generic_stub;
}
@@ -1940,7 +1974,6 @@
case CALLBACKS:
case INTERCEPTOR:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
code = (strict_mode == kStrictMode)
diff --git a/src/ic.h b/src/ic.h
index c1b9549..c86f316 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -451,7 +451,7 @@
private:
void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
- Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicStub(Handle<Map> receiver_map,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> default_stub);
@@ -467,6 +467,12 @@
static bool IsGrowStubKind(StubKind stub_kind) {
return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
}
+
+ static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
+ if (!IsTransitionStubKind(stub_kind)) return stub_kind;
+ if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
+ return STORE_NO_TRANSITION;
+ }
};
diff --git a/src/interface.cc b/src/interface.cc
index 7836110..86bb9d0 100644
--- a/src/interface.cc
+++ b/src/interface.cc
@@ -41,11 +41,13 @@
}
-Interface* Interface::Lookup(Handle<String> name) {
+Interface* Interface::Lookup(Handle<String> name, Zone* zone) {
ASSERT(IsModule());
ZoneHashMap* map = Chase()->exports_;
if (map == NULL) return NULL;
- ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false);
+ ZoneAllocationPolicy allocator(zone);
+ ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false,
+ allocator);
if (p == NULL) return NULL;
ASSERT(*static_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);
@@ -69,7 +71,7 @@
void Interface::DoAdd(
- void* name, uint32_t hash, Interface* interface, bool* ok) {
+ void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok) {
MakeModule(ok);
if (!*ok) return;
@@ -85,9 +87,13 @@
#endif
ZoneHashMap** map = &Chase()->exports_;
- if (*map == NULL) *map = new ZoneHashMap(Match, 8);
+ ZoneAllocationPolicy allocator(zone);
- ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen());
+ if (*map == NULL)
+ *map = new ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity,
+ allocator);
+
+ ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
if (p == NULL) {
// This didn't have name but was frozen already, that's an error.
*ok = false;
@@ -97,7 +103,7 @@
#ifdef DEBUG
Nesting nested;
#endif
- static_cast<Interface*>(p->value)->Unify(interface, ok);
+ static_cast<Interface*>(p->value)->Unify(interface, zone, ok);
}
#ifdef DEBUG
@@ -110,9 +116,9 @@
}
-void Interface::Unify(Interface* that, bool* ok) {
- if (this->forward_) return this->Chase()->Unify(that, ok);
- if (that->forward_) return this->Unify(that->Chase(), ok);
+void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
+ if (this->forward_) return this->Chase()->Unify(that, zone, ok);
+ if (that->forward_) return this->Unify(that->Chase(), zone, ok);
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
@@ -134,9 +140,9 @@
// Merge the smaller interface into the larger, for performance.
if (this->exports_ != NULL && (that->exports_ == NULL ||
this->exports_->occupancy() >= that->exports_->occupancy())) {
- this->DoUnify(that, ok);
+ this->DoUnify(that, ok, zone);
} else {
- that->DoUnify(this, ok);
+ that->DoUnify(this, ok, zone);
}
#ifdef DEBUG
@@ -151,7 +157,7 @@
}
-void Interface::DoUnify(Interface* that, bool* ok) {
+void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
@@ -166,7 +172,7 @@
ZoneHashMap* map = that->exports_;
if (map != NULL) {
for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), ok);
+ this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), zone, ok);
if (!*ok) return;
}
}
diff --git a/src/interface.h b/src/interface.h
index 580f082..2670e74 100644
--- a/src/interface.h
+++ b/src/interface.h
@@ -53,12 +53,12 @@
return &value_interface;
}
- static Interface* NewUnknown() {
- return new Interface(NONE);
+ static Interface* NewUnknown(Zone* zone) {
+ return new(zone) Interface(NONE);
}
- static Interface* NewModule() {
- return new Interface(MODULE);
+ static Interface* NewModule(Zone* zone) {
+ return new(zone) Interface(MODULE);
}
// ---------------------------------------------------------------------------
@@ -66,13 +66,13 @@
// Add a name to the list of exports. If it already exists, unify with
// interface, otherwise insert unless this is closed.
- void Add(Handle<String> name, Interface* interface, bool* ok) {
- DoAdd(name.location(), name->Hash(), interface, ok);
+ void Add(Handle<String> name, Interface* interface, Zone* zone, bool* ok) {
+ DoAdd(name.location(), name->Hash(), interface, zone, ok);
}
// Unify with another interface. If successful, both interface objects will
// represent the same type, and changes to one are reflected in the other.
- void Unify(Interface* that, bool* ok);
+ void Unify(Interface* that, Zone* zone, bool* ok);
// Determine this interface to be a value interface.
void MakeValue(bool* ok) {
@@ -116,7 +116,7 @@
Handle<JSModule> Instance() { return Chase()->instance_; }
// Look up an exported name. Returns NULL if not (yet) defined.
- Interface* Lookup(Handle<String> name);
+ Interface* Lookup(Handle<String> name, Zone* zone);
// ---------------------------------------------------------------------------
// Iterators.
@@ -187,8 +187,9 @@
return result;
}
- void DoAdd(void* name, uint32_t hash, Interface* interface, bool* ok);
- void DoUnify(Interface* that, bool* ok);
+ void DoAdd(void* name, uint32_t hash, Interface* interface, Zone* zone,
+ bool* ok);
+ void DoUnify(Interface* that, bool* ok, Zone* zone);
};
} } // namespace v8::internal
diff --git a/src/isolate.cc b/src/isolate.cc
index 6fcc926..8fcb370 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -921,7 +921,7 @@
}
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+Failure* Isolate::ReThrow(MaybeObject* exception) {
bool can_be_caught_externally = false;
bool catchable_by_javascript = is_catchable_by_javascript(exception);
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
@@ -1778,7 +1778,7 @@
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this);
+ stub_cache_ = new StubCache(this, zone());
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
diff --git a/src/isolate.h b/src/isolate.h
index 9aa1242..5ca2b87 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -578,6 +578,20 @@
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
+
+ Address pending_message_obj_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
+ }
+
+ Address has_pending_message_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
+ }
+
+ Address pending_message_script_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.pending_message_script_);
+ }
+
MaybeObject* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_top_.scheduled_exception_;
@@ -708,7 +722,7 @@
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
- Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
+ Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
Failure* ThrowIllegalOperation();
@@ -1394,7 +1408,6 @@
#define HEAP (v8::internal::Isolate::Current()->heap())
#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
-#define ZONE (v8::internal::Isolate::Current()->zone())
#define LOGGER (v8::internal::Isolate::Current()->logger())
diff --git a/src/json-parser.h b/src/json-parser.h
index d22cd0d..7265165 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -43,15 +43,15 @@
template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
- static Handle<Object> Parse(Handle<String> source) {
- return JsonParser().ParseJson(source);
+ static Handle<Object> Parse(Handle<String> source, Zone* zone) {
+ return JsonParser().ParseJson(source, zone);
}
static const int kEndOfString = -1;
private:
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> source);
+ Handle<Object> ParseJson(Handle<String> source, Zone* zone);
inline void Advance() {
position_++;
@@ -149,6 +149,7 @@
}
inline Isolate* isolate() { return isolate_; }
+ inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
@@ -161,11 +162,14 @@
Isolate* isolate_;
uc32 c0_;
int position_;
+ Zone* zone_;
};
template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
+Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
+ Zone* zone) {
isolate_ = source->map()->GetHeap()->isolate();
+ zone_ = zone;
FlattenString(source);
source_ = source;
source_length_ = source_->length();
@@ -323,7 +327,7 @@
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4);
+ ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '[');
AdvanceSkipWhitespace();
@@ -331,7 +335,7 @@
do {
Handle<Object> element = ParseJsonValue();
if (element.is_null()) return ReportUnexpectedCharacter();
- elements.Add(element);
+ elements.Add(element, zone());
} while (MatchSkipWhiteSpace(','));
if (c0_ != ']') {
return ReportUnexpectedCharacter();
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index f88511f..cd51db8 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -820,24 +820,24 @@
// the event that code generation is requested for an identical trace.
-void RegExpTree::AppendToText(RegExpText* text) {
+void RegExpTree::AppendToText(RegExpText* text, Zone* zone) {
UNREACHABLE();
}
-void RegExpAtom::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::Atom(this));
+void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::Atom(this), zone);
}
-void RegExpCharacterClass::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::CharClass(this));
+void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::CharClass(this), zone);
}
-void RegExpText::AppendToText(RegExpText* text) {
+void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i));
+ text->AddElement(elements()->at(i), zone);
}
@@ -868,8 +868,8 @@
DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
if (table_ == NULL) {
- table_ = new DispatchTable();
- DispatchTableConstructor cons(table_, ignore_case);
+ table_ = new(zone()) DispatchTable(zone());
+ DispatchTableConstructor cons(table_, ignore_case, zone());
cons.BuildTable(this);
}
return table_;
@@ -966,7 +966,7 @@
current_expansion_factor_ = value;
}
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
static const int kNoRegister = -1;
@@ -1014,7 +1014,7 @@
current_expansion_factor_(1),
frequency_collator_(),
zone_(zone) {
- accept_ = new EndNode(EndNode::ACCEPT, zone);
+ accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
}
@@ -1110,7 +1110,8 @@
}
-int Trace::FindAffectedRegisters(OutSet* affected_registers) {
+int Trace::FindAffectedRegisters(OutSet* affected_registers,
+ Zone* zone) {
int max_register = RegExpCompiler::kNoRegister;
for (DeferredAction* action = actions_;
action != NULL;
@@ -1118,10 +1119,10 @@
if (action->type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(action)->range();
for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i);
+ affected_registers->Set(i, zone);
if (range.to() > max_register) max_register = range.to();
} else {
- affected_registers->Set(action->reg());
+ affected_registers->Set(action->reg(), zone);
if (action->reg() > max_register) max_register = action->reg();
}
}
@@ -1150,7 +1151,8 @@
int max_register,
OutSet& affected_registers,
OutSet* registers_to_pop,
- OutSet* registers_to_clear) {
+ OutSet* registers_to_clear,
+ Zone* zone) {
// The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
@@ -1256,9 +1258,9 @@
}
assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg);
+ registers_to_pop->Set(reg, zone);
} else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg);
+ registers_to_clear->Set(reg, zone);
}
// Perform the chronologically last action (or accumulated increment)
// for the register.
@@ -1304,14 +1306,16 @@
assembler->PushCurrentPosition();
}
- int max_register = FindAffectedRegisters(&affected_registers);
+ int max_register = FindAffectedRegisters(&affected_registers,
+ compiler->zone());
OutSet registers_to_pop;
OutSet registers_to_clear;
PerformDeferredActions(assembler,
max_register,
affected_registers,
®isters_to_pop,
- ®isters_to_clear);
+ ®isters_to_clear,
+ compiler->zone());
if (cp_offset_ != 0) {
assembler->AdvanceCurrentPosition(cp_offset_);
}
@@ -1388,17 +1392,18 @@
}
-void GuardedAlternative::AddGuard(Guard* guard) {
+void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
if (guards_ == NULL)
- guards_ = new ZoneList<Guard*>(1);
- guards_->Add(guard);
+ guards_ = new(zone) ZoneList<Guard*>(1, zone);
+ guards_->Add(guard, zone);
}
ActionNode* ActionNode::SetRegister(int reg,
int val,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(SET_REGISTER, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(SET_REGISTER, on_success);
result->data_.u_store_register.reg = reg;
result->data_.u_store_register.value = val;
return result;
@@ -1406,7 +1411,8 @@
ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result = new ActionNode(INCREMENT_REGISTER, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
result->data_.u_increment_register.reg = reg;
return result;
}
@@ -1415,7 +1421,8 @@
ActionNode* ActionNode::StorePosition(int reg,
bool is_capture,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(STORE_POSITION, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(STORE_POSITION, on_success);
result->data_.u_position_register.reg = reg;
result->data_.u_position_register.is_capture = is_capture;
return result;
@@ -1424,7 +1431,8 @@
ActionNode* ActionNode::ClearCaptures(Interval range,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(CLEAR_CAPTURES, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
result->data_.u_clear_captures.range_from = range.from();
result->data_.u_clear_captures.range_to = range.to();
return result;
@@ -1434,7 +1442,8 @@
ActionNode* ActionNode::BeginSubmatch(int stack_reg,
int position_reg,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(BEGIN_SUBMATCH, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
result->data_.u_submatch.stack_pointer_register = stack_reg;
result->data_.u_submatch.current_position_register = position_reg;
return result;
@@ -1446,7 +1455,8 @@
int clear_register_count,
int clear_register_from,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
result->data_.u_submatch.stack_pointer_register = stack_reg;
result->data_.u_submatch.current_position_register = position_reg;
result->data_.u_submatch.clear_register_count = clear_register_count;
@@ -1459,7 +1469,8 @@
int repetition_register,
int repetition_limit,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(EMPTY_MATCH_CHECK, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
result->data_.u_empty_match_check.start_register = start_register;
result->data_.u_empty_match_check.repetition_register = repetition_register;
result->data_.u_empty_match_check.repetition_limit = repetition_limit;
@@ -2039,8 +2050,9 @@
Label* on_failure,
int cp_offset,
bool check_offset,
- bool preloaded) {
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ bool preloaded,
+ Zone* zone) {
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone);
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -2099,7 +2111,7 @@
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
- if (cc->is_standard() &&
+ if (cc->is_standard(zone) &&
macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
on_failure)) {
return;
@@ -2112,7 +2124,8 @@
// entry at zero which goes to the failure label, but if there
// was already one there we fall through for success on that entry.
// Subsequent entries have alternating meaning (success/failure).
- ZoneList<int>* range_boundaries = new ZoneList<int>(last_valid_range);
+ ZoneList<int>* range_boundaries =
+ new(zone) ZoneList<int>(last_valid_range, zone);
bool zeroth_entry_is_failure = !cc->is_negated();
@@ -2122,9 +2135,9 @@
ASSERT_EQ(i, 0);
zeroth_entry_is_failure = !zeroth_entry_is_failure;
} else {
- range_boundaries->Add(range.from());
+ range_boundaries->Add(range.from(), zone);
}
- range_boundaries->Add(range.to() + 1);
+ range_boundaries->Add(range.to() + 1, zone);
}
int end_index = range_boundaries->length() - 1;
if (range_boundaries->at(end_index) > max_char) {
@@ -2522,7 +2535,7 @@
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
+ ZoneList<CharacterRange>* ranges = tree->ranges(zone());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
// useful way to incorporate a negative char class into this scheme
@@ -2707,7 +2720,7 @@
} else {
ASSERT(elm.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* cc = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -2754,6 +2767,15 @@
if (info()->visited) return this;
VisitMarker marker(info());
int choice_count = alternatives_->length();
+
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ if (alternative.guards() != NULL && alternative.guards()->length() != 0) {
+ set_replacement(this);
+ return this;
+ }
+ }
+
int surviving = 0;
RegExpNode* survivor = NULL;
for (int i = 0; i < choice_count; i++) {
@@ -2775,13 +2797,13 @@
// Only some of the nodes survived the filtering. We need to rebuild the
// alternatives list.
ZoneList<GuardedAlternative>* new_alternatives =
- new ZoneList<GuardedAlternative>(surviving);
+ new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
alternatives_->at(i).node()->FilterASCII(depth - 1);
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
- new_alternatives->Add(alternatives_->at(i));
+ new_alternatives->Add(alternatives_->at(i), zone());
}
}
alternatives_ = new_alternatives;
@@ -2938,7 +2960,7 @@
EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
- new BoyerMooreLookahead(eats_at_least, compiler, zone());
+ new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
@@ -3165,7 +3187,8 @@
backtrack,
cp_offset,
*checked_up_to < cp_offset,
- preloaded);
+ preloaded,
+ zone());
UpdateBoundsCheck(cp_offset, checked_up_to);
}
}
@@ -3286,11 +3309,11 @@
RegExpCharacterClass* cc = elm.data.u_char_class;
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
- if (cc->is_standard()) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
int range_count = ranges->length();
for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
+ ranges->at(j).AddCaseEquivalents(ranges, is_ascii, zone());
}
}
}
@@ -3313,7 +3336,7 @@
TextElement elm = elms_->at(0);
if (elm.type != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = node->ranges();
+ ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -3435,13 +3458,13 @@
// size then it is on the stack, otherwise the excess is on the heap.
class AlternativeGenerationList {
public:
- explicit AlternativeGenerationList(int count)
- : alt_gens_(count) {
+ AlternativeGenerationList(int count, Zone* zone)
+ : alt_gens_(count, zone) {
for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i);
+ alt_gens_.Add(a_few_alt_gens_ + i, zone);
}
for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration());
+ alt_gens_.Add(new AlternativeGeneration(), zone);
}
}
~AlternativeGenerationList() {
@@ -3527,9 +3550,9 @@
} else {
max_char_ = String::kMaxUtf16CodeUnit;
}
- bitmaps_ = new ZoneList<BoyerMoorePositionInfo*>(length);
+ bitmaps_ = new(zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
for (int i = 0; i < length; i++) {
- bitmaps_->Add(new BoyerMoorePositionInfo(zone));
+ bitmaps_->Add(new(zone) BoyerMoorePositionInfo(zone), zone);
}
}
@@ -3875,7 +3898,9 @@
EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
- new BoyerMooreLookahead(eats_at_least, compiler, zone());
+ new(zone()) BoyerMooreLookahead(eats_at_least,
+ compiler,
+ zone());
GuardedAlternative alt0 = alternatives_->at(0);
alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
@@ -3897,7 +3922,7 @@
(current_trace->characters_preloaded() == preload_characters);
bool preload_has_checked_bounds = preload_is_current;
- AlternativeGenerationList alt_gens(choice_count);
+ AlternativeGenerationList alt_gens(choice_count, zone());
// For now we just call all choices one after the other. The idea ultimately
// is to use the Dispatch table to try only the relevant ones.
@@ -4377,6 +4402,7 @@
void DotPrinter::VisitText(TextNode* that) {
+ Zone* zone = that->zone();
stream()->Add(" n%p [label=\"", that);
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
@@ -4391,8 +4417,8 @@
stream()->Add("[");
if (node->is_negated())
stream()->Add("^");
- for (int j = 0; j < node->ranges()->length(); j++) {
- CharacterRange range = node->ranges()->at(j);
+ for (int j = 0; j < node->ranges(zone)->length(); j++) {
+ CharacterRange range = node->ranges(zone)->at(j);
stream()->Add("%k-%k", range.from(), range.to());
}
stream()->Add("]");
@@ -4550,15 +4576,16 @@
RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- ZoneList<TextElement>* elms = new ZoneList<TextElement>(1);
- elms->Add(TextElement::Atom(this));
- return new TextNode(elms, on_success);
+ ZoneList<TextElement>* elms =
+ new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
+ elms->Add(TextElement::Atom(this), compiler->zone());
+ return new(compiler->zone()) TextNode(elms, on_success);
}
RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new TextNode(elements(), on_success);
+ return new(compiler->zone()) TextNode(elements(), on_success);
}
@@ -4612,7 +4639,7 @@
}
-bool RegExpCharacterClass::is_standard() {
+bool RegExpCharacterClass::is_standard(Zone* zone) {
// TODO(lrn): Remove need for this function, by not throwing away information
// along the way.
if (is_negated_) {
@@ -4621,31 +4648,31 @@
if (set_.is_standard()) {
return true;
}
- if (CompareRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+ if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
set_.set_standard_set_type('s');
return true;
}
- if (CompareInverseRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+ if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
set_.set_standard_set_type('S');
return true;
}
- if (CompareInverseRanges(set_.ranges(),
+ if (CompareInverseRanges(set_.ranges(zone),
kLineTerminatorRanges,
kLineTerminatorRangeCount)) {
set_.set_standard_set_type('.');
return true;
}
- if (CompareRanges(set_.ranges(),
+ if (CompareRanges(set_.ranges(zone),
kLineTerminatorRanges,
kLineTerminatorRangeCount)) {
set_.set_standard_set_type('n');
return true;
}
- if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('w');
return true;
}
- if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('W');
return true;
}
@@ -4655,7 +4682,7 @@
RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new TextNode(this, on_success);
+ return new(compiler->zone()) TextNode(this, on_success);
}
@@ -4663,7 +4690,8 @@
RegExpNode* on_success) {
ZoneList<RegExpTree*>* alternatives = this->alternatives();
int length = alternatives->length();
- ChoiceNode* result = new ChoiceNode(length, compiler->zone());
+ ChoiceNode* result =
+ new(compiler->zone()) ChoiceNode(length, compiler->zone());
for (int i = 0; i < length; i++) {
GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
on_success));
@@ -4756,6 +4784,8 @@
int body_start_reg = RegExpCompiler::kNoRegister;
Interval capture_registers = body->CaptureRegisters();
bool needs_capture_clearing = !capture_registers.is_empty();
+ Zone* zone = compiler->zone();
+
if (body_can_be_empty) {
body_start_reg = compiler->AllocateRegister();
} else if (FLAG_regexp_optimization && !needs_capture_clearing) {
@@ -4786,7 +4816,7 @@
// Unroll the optional matches up to max.
RegExpNode* answer = on_success;
for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new ChoiceNode(2, compiler->zone());
+ ChoiceNode* alternation = new(zone) ChoiceNode(2, zone);
if (is_greedy) {
alternation->AddAlternative(
GuardedAlternative(body->ToNode(compiler, answer)));
@@ -4809,8 +4839,8 @@
int reg_ctr = needs_counter
? compiler->AllocateRegister()
: RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new LoopChoiceNode(body->min_match() == 0,
- compiler->zone());
+ LoopChoiceNode* center = new(zone) LoopChoiceNode(body->min_match() == 0,
+ zone);
if (not_at_start) center->set_not_at_start();
RegExpNode* loop_return = needs_counter
? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
@@ -4835,13 +4865,14 @@
}
GuardedAlternative body_alt(body_node);
if (has_max) {
- Guard* body_guard = new Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard);
+ Guard* body_guard =
+ new(zone) Guard(reg_ctr, Guard::LT, max);
+ body_alt.AddGuard(body_guard, zone);
}
GuardedAlternative rest_alt(on_success);
if (has_min) {
- Guard* rest_guard = new Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard);
+ Guard* rest_guard = new(compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
+ rest_alt.AddGuard(rest_guard, zone);
}
if (is_greedy) {
center->AddLoopAlternative(body_alt);
@@ -4861,6 +4892,8 @@
RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
NodeInfo info;
+ Zone* zone = compiler->zone();
+
switch (type()) {
case START_OF_LINE:
return AssertionNode::AfterNewline(on_success);
@@ -4879,13 +4912,13 @@
int stack_pointer_register = compiler->AllocateRegister();
int position_register = compiler->AllocateRegister();
// The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new ChoiceNode(2, compiler->zone());
+ ChoiceNode* result = new(zone) ChoiceNode(2, zone);
// Create a newline atom.
ZoneList<CharacterRange>* newline_ranges =
- new ZoneList<CharacterRange>(3);
- CharacterRange::AddClassEscape('n', newline_ranges);
- RegExpCharacterClass* newline_atom = new RegExpCharacterClass('n');
- TextNode* newline_matcher = new TextNode(
+ new(zone) ZoneList<CharacterRange>(3, zone);
+ CharacterRange::AddClassEscape('n', newline_ranges, zone);
+ RegExpCharacterClass* newline_atom = new(zone) RegExpCharacterClass('n');
+ TextNode* newline_matcher = new(zone) TextNode(
newline_atom,
ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
position_register,
@@ -4913,9 +4946,10 @@
RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()),
- on_success);
+ return new(compiler->zone())
+ BackReferenceNode(RegExpCapture::StartRegister(index()),
+ RegExpCapture::EndRegister(index()),
+ on_success);
}
@@ -4960,18 +4994,20 @@
// for a negative lookahead. The NegativeLookaheadChoiceNode is a special
// ChoiceNode that knows to ignore the first exit when calculating quick
// checks.
+ Zone* zone = compiler->zone();
+
GuardedAlternative body_alt(
body()->ToNode(
compiler,
- success = new NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- compiler->zone())));
+ success = new(zone) NegativeSubmatchSuccess(stack_pointer_register,
+ position_register,
+ register_count,
+ register_start,
+ zone)));
ChoiceNode* choice_node =
- new NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success),
- compiler->zone());
+ new(zone) NegativeLookaheadChoiceNode(body_alt,
+ GuardedAlternative(on_success),
+ zone);
return ActionNode::BeginSubmatch(stack_pointer_register,
position_register,
choice_node);
@@ -5010,19 +5046,21 @@
static void AddClass(const int* elmv,
int elmc,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
elmc--;
ASSERT(elmv[elmc] == 0x10000);
for (int i = 0; i < elmc; i += 2) {
ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1));
+ ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
}
}
static void AddClassNegated(const int *elmv,
int elmc,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
elmc--;
ASSERT(elmv[elmc] == 0x10000);
ASSERT(elmv[0] != 0x0000);
@@ -5031,51 +5069,54 @@
for (int i = 0; i < elmc; i += 2) {
ASSERT(last <= elmv[i] - 1);
ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(last, elmv[i] - 1));
+ ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
+ ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit), zone);
}
void CharacterRange::AddClassEscape(uc16 type,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
switch (type) {
case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges);
+ AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
break;
case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges);
+ AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
break;
case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges);
+ AddClass(kWordRanges, kWordRangeCount, ranges, zone);
break;
case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges);
+ AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
break;
case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges);
+ AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
break;
case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges);
+ AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
break;
case '.':
AddClassNegated(kLineTerminatorRanges,
kLineTerminatorRangeCount,
- ranges);
+ ranges,
+ zone);
break;
// This is not a character range as defined by the spec but a
// convenient shorthand for a character class that matches any
// character.
case '*':
- ranges->Add(CharacterRange::Everything());
+ ranges->Add(CharacterRange::Everything(), zone);
break;
// This is the set of characters matched by the $ and ^ symbols
// in multiline mode.
case 'n':
AddClass(kLineTerminatorRanges,
kLineTerminatorRangeCount,
- ranges);
+ ranges,
+ zone);
break;
default:
UNREACHABLE();
@@ -5091,9 +5132,11 @@
class CharacterRangeSplitter {
public:
CharacterRangeSplitter(ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded)
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone)
: included_(included),
- excluded_(excluded) { }
+ excluded_(excluded),
+ zone_(zone) { }
void Call(uc16 from, DispatchTable::Entry entry);
static const int kInBase = 0;
@@ -5102,6 +5145,7 @@
private:
ZoneList<CharacterRange>** included_;
ZoneList<CharacterRange>** excluded_;
+ Zone* zone_;
};
@@ -5110,31 +5154,33 @@
ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
? included_
: excluded_;
- if (*target == NULL) *target = new ZoneList<CharacterRange>(2);
- (*target)->Add(CharacterRange(entry.from(), entry.to()));
+ if (*target == NULL) *target = new(zone_) ZoneList<CharacterRange>(2, zone_);
+ (*target)->Add(CharacterRange(entry.from(), entry.to()), zone_);
}
void CharacterRange::Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded) {
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone) {
ASSERT_EQ(NULL, *included);
ASSERT_EQ(NULL, *excluded);
- DispatchTable table;
+ DispatchTable table(zone);
for (int i = 0; i < base->length(); i++)
- table.AddRange(base->at(i), CharacterRangeSplitter::kInBase);
+ table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
for (int i = 0; i < overlay.length(); i += 2) {
table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
- CharacterRangeSplitter::kInOverlay);
+ CharacterRangeSplitter::kInOverlay, zone);
}
- CharacterRangeSplitter callback(included, excluded);
+ CharacterRangeSplitter callback(included, excluded, zone);
table.ForEach(&callback);
}
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_ascii) {
+ bool is_ascii,
+ Zone* zone) {
Isolate* isolate = Isolate::Current();
uc16 bottom = from();
uc16 top = to();
@@ -5149,7 +5195,7 @@
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]));
+ ranges->Add(CharacterRange::Singleton(chars[i]), zone);
}
}
} else {
@@ -5189,7 +5235,7 @@
uc16 range_from = c - (block_end - pos);
uc16 range_to = c - (block_end - end);
if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange(range_from, range_to));
+ ranges->Add(CharacterRange(range_from, range_to), zone);
}
}
pos = end + 1;
@@ -5212,10 +5258,10 @@
}
-ZoneList<CharacterRange>* CharacterSet::ranges() {
+ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
if (ranges_ == NULL) {
- ranges_ = new ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_);
+ ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape(standard_set_type_, ranges_, zone);
}
return ranges_;
}
@@ -5344,7 +5390,8 @@
void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges) {
+ ZoneList<CharacterRange>* negated_ranges,
+ Zone* zone) {
ASSERT(CharacterRange::IsCanonical(ranges));
ASSERT_EQ(0, negated_ranges->length());
int range_count = ranges->length();
@@ -5356,12 +5403,13 @@
}
while (i < range_count) {
CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
+ negated_ranges->Add(CharacterRange(from + 1, range.from() - 1), zone);
from = range.to();
i++;
}
if (from < String::kMaxUtf16CodeUnit) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
+ negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit),
+ zone);
}
}
@@ -5370,33 +5418,33 @@
// Splay tree
-OutSet* OutSet::Extend(unsigned value) {
+OutSet* OutSet::Extend(unsigned value, Zone* zone) {
if (Get(value))
return this;
- if (successors() != NULL) {
- for (int i = 0; i < successors()->length(); i++) {
- OutSet* successor = successors()->at(i);
+ if (successors(zone) != NULL) {
+ for (int i = 0; i < successors(zone)->length(); i++) {
+ OutSet* successor = successors(zone)->at(i);
if (successor->Get(value))
return successor;
}
} else {
- successors_ = new ZoneList<OutSet*>(2);
+ successors_ = new(zone) ZoneList<OutSet*>(2, zone);
}
- OutSet* result = new OutSet(first_, remaining_);
- result->Set(value);
- successors()->Add(result);
+ OutSet* result = new(zone) OutSet(first_, remaining_);
+ result->Set(value, zone);
+ successors(zone)->Add(result, zone);
return result;
}
-void OutSet::Set(unsigned value) {
+void OutSet::Set(unsigned value, Zone *zone) {
if (value < kFirstLimit) {
first_ |= (1 << value);
} else {
if (remaining_ == NULL)
- remaining_ = new ZoneList<unsigned>(1);
+ remaining_ = new(zone) ZoneList<unsigned>(1, zone);
if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value);
+ remaining_->Add(value, zone);
}
}
@@ -5415,13 +5463,15 @@
const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-void DispatchTable::AddRange(CharacterRange full_range, int value) {
+void DispatchTable::AddRange(CharacterRange full_range, int value,
+ Zone* zone) {
CharacterRange current = full_range;
if (tree()->is_empty()) {
// If this is the first range we just insert into the table.
ZoneSplayTree<Config>::Locator loc;
ASSERT_RESULT(tree()->Insert(current.from(), &loc));
- loc.set_value(Entry(current.from(), current.to(), empty()->Extend(value)));
+ loc.set_value(Entry(current.from(), current.to(),
+ empty()->Extend(value, zone)));
return;
}
// First see if there is a range to the left of this one that
@@ -5464,7 +5514,7 @@
ASSERT_RESULT(tree()->Insert(current.from(), &ins));
ins.set_value(Entry(current.from(),
entry->from() - 1,
- empty()->Extend(value)));
+ empty()->Extend(value, zone)));
current.set_from(entry->from());
}
ASSERT_EQ(current.from(), entry->from());
@@ -5482,7 +5532,7 @@
// The overlapping range is now completely contained by the range
// we're adding so we can just update it and move the start point
// of the range we're adding just past it.
- entry->AddValue(value);
+ entry->AddValue(value, zone);
// Bail out if the last interval ended at 0xFFFF since otherwise
// adding 1 will wrap around to 0.
if (entry->to() == String::kMaxUtf16CodeUnit)
@@ -5495,7 +5545,7 @@
ASSERT_RESULT(tree()->Insert(current.from(), &ins));
ins.set_value(Entry(current.from(),
current.to(),
- empty()->Extend(value)));
+ empty()->Extend(value, zone)));
break;
}
}
@@ -5695,7 +5745,7 @@
} else {
ASSERT(text.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* char_class = text.data.u_char_class;
- ZoneList<CharacterRange>* ranges = char_class->ranges();
+ ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
bm->SetAll(offset);
} else {
@@ -5815,7 +5865,7 @@
}
case TextElement::CHAR_CLASS: {
RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
+ ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
if (tree->is_negated()) {
AddInverse(ranges);
} else {
@@ -5879,7 +5929,7 @@
RegExpQuantifier::ToNode(0,
RegExpTree::kInfinity,
false,
- new RegExpCharacterClass('*'),
+ new(zone) RegExpCharacterClass('*'),
&compiler,
captured_body,
data->contains_anchor);
@@ -5887,10 +5937,10 @@
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
// at the start of input.
- ChoiceNode* first_step_node = new ChoiceNode(2, zone);
+ ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
first_step_node->AddAlternative(GuardedAlternative(captured_body));
first_step_node->AddAlternative(GuardedAlternative(
- new TextNode(new RegExpCharacterClass('*'), loop_node)));
+ new(zone) TextNode(new(zone) RegExpCharacterClass('*'), loop_node)));
node = first_step_node;
} else {
node = loop_node;
@@ -5903,7 +5953,7 @@
if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
}
- if (node == NULL) node = new EndNode(EndNode::BACKTRACK, zone);
+ if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
Analysis analysis(ignore_case, is_ascii);
analysis.EnsureAnalyzed(node);
@@ -5921,13 +5971,17 @@
: NativeRegExpMacroAssembler::UC16;
#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/src/jsregexp.h b/src/jsregexp.h
index fa4ef5d..782c5b0 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -245,7 +245,8 @@
// For compatibility with the CHECK_OK macro
CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT
CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
+ static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
+ Zone* zone);
static Vector<const int> GetWordBounds();
static inline CharacterRange Singleton(uc16 value) {
return CharacterRange(value, value);
@@ -265,11 +266,13 @@
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
+ void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii,
+ Zone* zone);
static void Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded);
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone);
// Whether a range list is in canonical form: Ranges ordered by from value,
// and ranges non-overlapping and non-adjacent.
static bool IsCanonical(ZoneList<CharacterRange>* ranges);
@@ -280,7 +283,8 @@
static void Canonicalize(ZoneList<CharacterRange>* ranges);
// Negate the contents of a character range in canonical form.
static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst);
+ ZoneList<CharacterRange>* dst,
+ Zone* zone);
static const int kStartMarker = (1 << 24);
static const int kPayloadMask = (1 << 24) - 1;
@@ -295,7 +299,7 @@
class OutSet: public ZoneObject {
public:
OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
- OutSet* Extend(unsigned value);
+ OutSet* Extend(unsigned value, Zone* zone);
bool Get(unsigned value);
static const unsigned kFirstLimit = 32;
@@ -303,12 +307,12 @@
// Destructively set a value in this set. In most cases you want
// to use Extend instead to ensure that only one instance exists
// that contains the same values.
- void Set(unsigned value);
+ void Set(unsigned value, Zone* zone);
// The successors are a list of sets that contain the same values
// as this set and the one more value that is not present in this
// set.
- ZoneList<OutSet*>* successors() { return successors_; }
+ ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
OutSet(uint32_t first, ZoneList<unsigned>* remaining)
: first_(first), remaining_(remaining), successors_(NULL) { }
@@ -323,6 +327,8 @@
// Used for mapping character ranges to choices.
class DispatchTable : public ZoneObject {
public:
+ explicit DispatchTable(Zone* zone) : tree_(zone) { }
+
class Entry {
public:
Entry() : from_(0), to_(0), out_set_(NULL) { }
@@ -331,7 +337,9 @@
uc16 from() { return from_; }
uc16 to() { return to_; }
void set_to(uc16 value) { to_ = value; }
- void AddValue(int value) { out_set_ = out_set_->Extend(value); }
+ void AddValue(int value, Zone* zone) {
+ out_set_ = out_set_->Extend(value, zone);
+ }
OutSet* out_set() { return out_set_; }
private:
uc16 from_;
@@ -355,12 +363,14 @@
}
};
- void AddRange(CharacterRange range, int value);
+ void AddRange(CharacterRange range, int value, Zone* zone);
OutSet* Get(uc16 value);
void Dump();
template <typename Callback>
- void ForEach(Callback* callback) { return tree()->ForEach(callback); }
+ void ForEach(Callback* callback) {
+ return tree()->ForEach(callback);
+ }
private:
// There can't be a static empty set since it allocates its
@@ -635,7 +645,7 @@
return bm_info_[not_at_start ? 1 : 0];
}
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
protected:
enum LimitResult { DONE, CONTINUE };
@@ -811,7 +821,7 @@
TextNode(RegExpCharacterClass* that,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
- elms_(new ZoneList<TextElement>(1, zone())) {
+ elms_(new(zone()) ZoneList<TextElement>(1, zone())) {
elms_->Add(TextElement::CharClass(that), zone());
}
virtual void Accept(NodeVisitor* visitor);
@@ -868,19 +878,19 @@
AFTER_NEWLINE
};
static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new AssertionNode(AT_END, on_success);
+ return new(on_success->zone()) AssertionNode(AT_END, on_success);
}
static AssertionNode* AtStart(RegExpNode* on_success) {
- return new AssertionNode(AT_START, on_success);
+ return new(on_success->zone()) AssertionNode(AT_START, on_success);
}
static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_BOUNDARY, on_success);
+ return new(on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
}
static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_NON_BOUNDARY, on_success);
+ return new(on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
}
static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new AssertionNode(AFTER_NEWLINE, on_success);
+ return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1018,7 +1028,7 @@
class GuardedAlternative {
public:
explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
- void AddGuard(Guard* guard);
+ void AddGuard(Guard* guard, Zone* zone);
RegExpNode* node() { return node_; }
void set_node(RegExpNode* node) { node_ = node; }
ZoneList<Guard*>* guards() { return guards_; }
@@ -1036,7 +1046,8 @@
public:
explicit ChoiceNode(int expected_size, Zone* zone)
: RegExpNode(zone),
- alternatives_(new ZoneList<GuardedAlternative>(expected_size, zone)),
+ alternatives_(new(zone)
+ ZoneList<GuardedAlternative>(expected_size, zone)),
table_(NULL),
not_at_start_(false),
being_calculated_(false) { }
@@ -1220,7 +1231,7 @@
class BoyerMoorePositionInfo : public ZoneObject {
public:
explicit BoyerMoorePositionInfo(Zone* zone)
- : map_(new ZoneList<bool>(kMapSize, zone)),
+ : map_(new(zone) ZoneList<bool>(kMapSize, zone)),
map_count_(0),
w_(kNotYet),
s_(kNotYet),
@@ -1458,12 +1469,13 @@
void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
private:
- int FindAffectedRegisters(OutSet* affected_registers);
+ int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear);
+ int max_register,
+ OutSet& affected_registers,
+ OutSet* registers_to_pop,
+ OutSet* registers_to_clear,
+ Zone* zone);
void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
int max_register,
OutSet& registers_to_pop,
@@ -1496,15 +1508,17 @@
// dispatch table of a choice node.
class DispatchTableConstructor: public NodeVisitor {
public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case)
+ DispatchTableConstructor(DispatchTable* table, bool ignore_case,
+ Zone* zone)
: table_(table),
choice_index_(-1),
- ignore_case_(ignore_case) { }
+ ignore_case_(ignore_case),
+ zone_(zone) { }
void BuildTable(ChoiceNode* node);
void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_);
+ table()->AddRange(range, choice_index_, zone_);
}
void AddInverse(ZoneList<CharacterRange>* ranges);
@@ -1521,6 +1535,7 @@
DispatchTable* table_;
int choice_index_;
bool ignore_case_;
+ Zone* zone_;
};
diff --git a/src/list-inl.h b/src/list-inl.h
index 3eff582..60a033d 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -147,7 +147,11 @@
template<typename T, class P>
void List<T, P>::Clear() {
DeleteData(data_);
- Initialize(0);
+ // We don't call Initialize(0) since that requires passing a Zone,
+ // which we don't really need.
+ data_ = NULL;
+ capacity_ = 0;
+ length_ = 0;
}
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 9534f9e..bbc405b 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -230,9 +230,9 @@
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register());
+ op = LDoubleRegister::Create(assigned_register(), zone);
} else {
- op = LRegister::Create(assigned_register());
+ op = LRegister::Create(assigned_register(), zone);
}
} else if (IsSpilled()) {
ASSERT(!HasRegisterAssigned());
@@ -533,14 +533,14 @@
LAllocator::LAllocator(int num_values, HGraph* graph)
: zone_(graph->zone()),
chunk_(NULL),
- live_in_sets_(graph->blocks()->length()),
- live_ranges_(num_values * 2),
+ live_in_sets_(graph->blocks()->length(), zone_),
+ live_ranges_(num_values * 2, zone_),
fixed_live_ranges_(NULL),
fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2),
- active_live_ranges_(8),
- inactive_live_ranges_(8),
- reusable_slots_(8),
+ unhandled_live_ranges_(num_values * 2, zone_),
+ active_live_ranges_(8, zone_),
+ inactive_live_ranges_(8, zone_),
+ reusable_slots_(8, zone_),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
mode_(GENERAL_REGISTERS),
@@ -553,8 +553,8 @@
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
int block_count = graph_->blocks()->length();
- live_in_sets_.Initialize(block_count);
- live_in_sets_.AddBlock(NULL, block_count);
+ live_in_sets_.Initialize(block_count, zone());
+ live_in_sets_.AddBlock(NULL, block_count, zone());
}
@@ -630,7 +630,7 @@
TraceAlloc("Fixed reg is tagged at %d\n", pos);
LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand);
+ instr->pointer_map()->RecordPointer(operand, zone());
}
}
return operand;
@@ -665,7 +665,7 @@
LiveRange* LAllocator::LiveRangeFor(int index) {
if (index >= live_ranges_.length()) {
- live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
+ live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
}
LiveRange* result = live_ranges_[index];
if (result == NULL) {
@@ -746,7 +746,7 @@
LOperand* from,
LOperand* to) {
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -755,13 +755,13 @@
if (cur_to->IsUnallocated()) {
if (LUnallocated::cast(cur_to)->virtual_register() ==
LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to);
+ move->AddMove(cur.source(), to, zone());
return;
}
}
}
}
- move->AddMove(from, to);
+ move->AddMove(from, to, zone());
}
@@ -800,7 +800,7 @@
LiveRange* range = LiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained();
+ LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
bool is_tagged = HasTaggedValue(first_output->virtual_register());
AllocateFixed(first_output, gap_index, is_tagged);
@@ -821,8 +821,8 @@
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
- move->AddMove(first_output, range->GetSpillOperand());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
+ move->AddMove(first_output, range->GetSpillOperand(), zone());
}
}
@@ -831,7 +831,7 @@
for (UseIterator it(second); !it.Done(); it.Advance()) {
LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -840,7 +840,7 @@
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
cur_input->set_virtual_register(GetVirtualRegister());
if (!AllocationOk()) return;
@@ -864,7 +864,7 @@
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
cur_input->set_virtual_register(second_output->virtual_register());
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -872,7 +872,7 @@
int index = gap_index + 1;
LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy);
+ instr->pointer_map()->RecordPointer(input_copy, zone());
}
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
@@ -901,7 +901,7 @@
if (IsGapAt(index)) {
// We have a gap at this position.
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
@@ -1046,17 +1046,17 @@
InstructionAt(cur_block->last_instruction_index());
if (branch->HasPointerMap()) {
if (phi->representation().IsTagged()) {
- branch->pointer_map()->RecordPointer(phi_operand);
+ branch->pointer_map()->RecordPointer(phi_operand, zone());
} else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand);
+ branch->pointer_map()->RecordUntagged(phi_operand, zone());
}
}
}
LiveRange* live_range = LiveRangeFor(phi->id());
LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START)->
- AddMove(phi_operand, live_range->GetSpillOperand());
+ label->GetOrCreateParallelMove(LGap::START, zone())->
+ AddMove(phi_operand, live_range->GetSpillOperand(), zone());
live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
}
}
@@ -1151,14 +1151,15 @@
LInstruction* branch = InstructionAt(pred->last_instruction_index());
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op);
+ branch->pointer_map()->RecordPointer(cur_op, zone());
} else if (!cur_op->IsDoubleStackSlot() &&
!cur_op->IsDoubleRegister()) {
branch->pointer_map()->RemovePointer(cur_op);
}
}
}
- gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
+ gap->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(pred_op, cur_op, zone());
}
}
}
@@ -1169,11 +1170,11 @@
if (IsGapAt(index)) {
LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END);
+ pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
+ (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
}
@@ -1205,7 +1206,7 @@
LParallelMove* move = GetConnectingParallelMove(pos);
LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
- move->AddMove(prev_operand, cur_operand);
+ move->AddMove(prev_operand, cur_operand, zone());
}
}
}
@@ -1270,7 +1271,7 @@
LOperand* hint = NULL;
LOperand* phi_operand = NULL;
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() &&
@@ -1421,7 +1422,7 @@
safe_point >= range->spill_start_index()) {
TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand());
+ map->RecordPointer(range->GetSpillOperand(), zone());
}
if (!cur->IsSpilled()) {
@@ -1430,7 +1431,7 @@
cur->id(), cur->Start().Value(), safe_point);
LOperand* operand = cur->CreateAssignedOperand(zone_);
ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand);
+ map->RecordPointer(operand, zone());
}
}
}
@@ -1632,13 +1633,13 @@
void LAllocator::AddToActive(LiveRange* range) {
TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges_.Add(range);
+ active_live_ranges_.Add(range, zone());
}
void LAllocator::AddToInactive(LiveRange* range) {
TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges_.Add(range);
+ inactive_live_ranges_.Add(range, zone());
}
@@ -1649,13 +1650,13 @@
LiveRange* cur_range = unhandled_live_ranges_.at(i);
if (range->ShouldBeAllocatedBefore(cur_range)) {
TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- unhandled_live_ranges_.InsertAt(i + 1, range);
+ unhandled_live_ranges_.InsertAt(i + 1, range, zone());
ASSERT(UnhandledIsSorted());
return;
}
}
TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges_.InsertAt(0, range);
+ unhandled_live_ranges_.InsertAt(0, range, zone());
ASSERT(UnhandledIsSorted());
}
@@ -1664,7 +1665,7 @@
if (range == NULL || range->IsEmpty()) return;
ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges_.Add(range);
+ unhandled_live_ranges_.Add(range, zone());
}
@@ -1705,7 +1706,7 @@
int index = range->TopLevel()->GetSpillOperand()->index();
if (index >= 0) {
- reusable_slots_.Add(range);
+ reusable_slots_.Add(range, zone());
}
}
@@ -1733,7 +1734,7 @@
void LAllocator::ActiveToInactive(LiveRange* range) {
ASSERT(active_live_ranges_.Contains(range));
active_live_ranges_.RemoveElement(range);
- inactive_live_ranges_.Add(range);
+ inactive_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from active to inactive\n", range->id());
}
@@ -1749,7 +1750,7 @@
void LAllocator::InactiveToActive(LiveRange* range) {
ASSERT(inactive_live_ranges_.Contains(range));
inactive_live_ranges_.RemoveElement(range);
- active_live_ranges_.Add(range);
+ active_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from inactive to active\n", range->id());
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index f5ab055..d47e335 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -457,6 +457,7 @@
LChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
int GetVirtualRegister() {
if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
diff --git a/src/lithium.cc b/src/lithium.cc
index 4ee2a7a..fd8b796 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -171,11 +171,11 @@
}
-void LPointerMap::RecordPointer(LOperand* op) {
+void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
+ pointer_operands_.Add(op, zone);
}
@@ -192,11 +192,11 @@
}
-void LPointerMap::RecordUntagged(LOperand* op) {
+void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op);
+ untagged_operands_.Add(op, zone);
}
diff --git a/src/lithium.h b/src/lithium.h
index 1bab5ac..1f42b68 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -187,8 +187,8 @@
value_ = VirtualRegisterField::update(value_, id);
}
- LUnallocated* CopyUnconstrained() {
- LUnallocated* result = new LUnallocated(ANY);
+ LUnallocated* CopyUnconstrained(Zone* zone) {
+ LUnallocated* result = new(zone) LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
@@ -260,10 +260,10 @@
class LConstantOperand: public LOperand {
public:
- static LConstantOperand* Create(int index) {
+ static LConstantOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LConstantOperand(index);
+ return new(zone) LConstantOperand(index);
}
static LConstantOperand* cast(LOperand* op) {
@@ -296,10 +296,10 @@
class LStackSlot: public LOperand {
public:
- static LStackSlot* Create(int index) {
+ static LStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LStackSlot(index);
+ return new(zone) LStackSlot(index);
}
static LStackSlot* cast(LOperand* op) {
@@ -321,10 +321,10 @@
class LDoubleStackSlot: public LOperand {
public:
- static LDoubleStackSlot* Create(int index) {
+ static LDoubleStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleStackSlot(index);
+ return new(zone) LDoubleStackSlot(index);
}
static LDoubleStackSlot* cast(LOperand* op) {
@@ -346,10 +346,10 @@
class LRegister: public LOperand {
public:
- static LRegister* Create(int index) {
+ static LRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LRegister(index);
+ return new(zone) LRegister(index);
}
static LRegister* cast(LOperand* op) {
@@ -371,10 +371,10 @@
class LDoubleRegister: public LOperand {
public:
- static LDoubleRegister* Create(int index) {
+ static LDoubleRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleRegister(index);
+ return new(zone) LDoubleRegister(index);
}
static LDoubleRegister* cast(LOperand* op) {
@@ -396,10 +396,10 @@
class LParallelMove : public ZoneObject {
public:
- LParallelMove() : move_operands_(4) { }
+ explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
+ void AddMove(LOperand* from, LOperand* to, Zone* zone) {
+ move_operands_.Add(LMoveOperands(from, to), zone);
}
bool IsRedundant() const;
@@ -417,9 +417,9 @@
class LPointerMap: public ZoneObject {
public:
- explicit LPointerMap(int position)
- : pointer_operands_(8),
- untagged_operands_(0),
+ explicit LPointerMap(int position, Zone* zone)
+ : pointer_operands_(8, zone),
+ untagged_operands_(0, zone),
position_(position),
lithium_position_(-1) { }
@@ -438,9 +438,9 @@
lithium_position_ = pos;
}
- void RecordPointer(LOperand* op);
+ void RecordPointer(LOperand* op, Zone* zone);
void RemovePointer(LOperand* op);
- void RecordUntagged(LOperand* op);
+ void RecordUntagged(LOperand* op, Zone* zone);
void PrintTo(StringStream* stream);
private:
@@ -469,7 +469,7 @@
ast_id_(ast_id),
parameter_count_(parameter_count),
pc_offset_(-1),
- values_(value_count),
+ values_(value_count, zone),
is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
@@ -492,7 +492,7 @@
LEnvironment* outer() const { return outer_; }
void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
+ values_.Add(operand, zone());
if (representation.IsTagged()) {
is_tagged_.Add(values_.length() - 1);
}
@@ -522,7 +522,7 @@
void PrintTo(StringStream* stream);
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
private:
Handle<JSFunction> closure_;
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 22b8250..e670b44 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -825,7 +825,8 @@
// Saves full information about a function: its code, its scope info
// and a SharedFunctionInfo object.
- void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
+ void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope,
+ Zone* zone) {
if (!shared->IsSharedFunctionInfo()) {
return;
}
@@ -836,14 +837,14 @@
Handle<Object>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
- Handle<Object> scope_info_list(SerializeFunctionScope(scope));
+ Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone));
info.SetOuterScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
private:
- Object* SerializeFunctionScope(Scope* scope) {
+ Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
HandleScope handle_scope;
Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
@@ -857,8 +858,8 @@
return HEAP->undefined_value();
}
do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+ ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
@@ -927,28 +928,32 @@
// It works in context of ZoneScope.
class ReferenceCollectorVisitor : public ObjectVisitor {
public:
- explicit ReferenceCollectorVisitor(Code* original)
- : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
+ ReferenceCollectorVisitor(Code* original, Zone* zone)
+ : original_(original),
+ rvalues_(10, zone),
+ reloc_infos_(10, zone),
+ code_entries_(10, zone),
+ zone_(zone) {
}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
- rvalues_.Add(p);
+ rvalues_.Add(p, zone_);
}
}
}
virtual void VisitCodeEntry(Address entry) {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
- code_entries_.Add(entry);
+ code_entries_.Add(entry, zone_);
}
}
virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- reloc_infos_.Add(*rinfo);
+ reloc_infos_.Add(*rinfo, zone_);
}
}
@@ -977,6 +982,7 @@
ZoneList<Object**> rvalues_;
ZoneList<RelocInfo> reloc_infos_;
ZoneList<Address> code_entries_;
+ Zone* zone_;
};
@@ -990,7 +996,7 @@
// A zone scope for ReferenceCollectorVisitor.
ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
- ReferenceCollectorVisitor visitor(original);
+ ReferenceCollectorVisitor visitor(original, Isolate::Current()->zone());
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
@@ -1592,11 +1598,12 @@
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
+ Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
+ Zone* zone) {
Isolate* isolate = Isolate::Current();
Debug* debug = isolate->debug();
ZoneScope scope(isolate, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap();
+ Vector<StackFrame*> frames = CreateStackMap(zone);
int array_len = Smi::cast(shared_info_array->length())->value();
@@ -1723,7 +1730,7 @@
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop) {
+ Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
int len = Smi::cast(shared_info_array->length())->value();
Handle<JSArray> result = FACTORY->NewJSArray(len);
@@ -1748,7 +1755,7 @@
// Try to drop activations from the current stack.
const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop);
+ DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
@@ -1776,9 +1783,11 @@
void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
+ Zone* zone) {
if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope());
+ isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope(),
+ zone);
}
}
diff --git a/src/liveedit.h b/src/liveedit.h
index 4ee4466..424c24e 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -69,7 +69,7 @@
explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
~LiveEditFunctionTracker();
void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
- FunctionLiteral* lit);
+ FunctionLiteral* lit, Zone* zone);
void RecordRootFunctionInfo(Handle<Code> code);
static bool IsActive(Isolate* isolate);
@@ -121,7 +121,7 @@
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop);
+ Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8e149fe..878c974 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1883,6 +1883,17 @@
enum_cache);
}
+ // TODO(verwaest) Make sure we free unused transitions.
+ if (descriptors->elements_transition_map() != NULL) {
+ Object** transitions_slot = descriptors->GetTransitionsSlot();
+ Object* transitions = *transitions_slot;
+ base_marker()->MarkObjectAndPush(
+ reinterpret_cast<HeapObject*>(transitions));
+ mark_compact_collector()->RecordSlot(descriptor_start,
+ transitions_slot,
+ transitions);
+ }
+
// If the descriptor contains a transition (value is a Map), we don't mark the
// value as live. It might be set to the NULL_DESCRIPTOR in
// ClearNonLiveTransitions later.
@@ -1921,12 +1932,6 @@
MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
}
break;
- case ELEMENTS_TRANSITION:
- // For maps with multiple elements transitions, the transition maps are
- // stored in a FixedArray. Keep the fixed array alive but not the maps
- // that it refers to.
- if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
- break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 3ed794a..263656e 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -817,10 +817,11 @@
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ : isolate()->factory()->undefined_value(),
+ zone());
break;
case Variable::PARAMETER:
@@ -877,12 +878,12 @@
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
+ globals_->Add(function, zone());
break;
}
@@ -936,8 +937,8 @@
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
@@ -1611,7 +1612,7 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -4529,14 +4530,55 @@
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(0 == kSmiTag);
__ Addu(a1, a1, Operand(a1)); // Convert to smi.
+
+ // Store result register while executing finally block.
+ __ push(a1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ lw(a1, MemOperand(at));
+ __ push(a1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ lw(a1, MemOperand(at));
+ __ push(a1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ lw(a1, MemOperand(at));
__ push(a1);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(a1));
+ // Restore pending message from stack.
+ __ pop(a1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ sw(a1, MemOperand(at));
+
+ __ pop(a1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ sw(a1, MemOperand(at));
+
+ __ pop(a1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ sw(a1, MemOperand(at));
+
// Restore result register from stack.
__ pop(a1);
+
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 6257515..67dbe69 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -592,14 +592,14 @@
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -678,7 +678,7 @@
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -724,14 +724,14 @@
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ safepoint.DefinePointerRegister(cp, zone());
}
}
@@ -743,7 +743,7 @@
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -2044,7 +2044,7 @@
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
@@ -2315,12 +2315,12 @@
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsFound() && lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2332,9 +2332,23 @@
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ HeapObject* current = HeapObject::cast((*type)->prototype());
+ Heap* heap = type->GetHeap();
+ while (current != heap->null_value()) {
+ Handle<HeapObject> link(current);
+ __ LoadHeapObject(result, link);
+ __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, env,
+ result, Operand(Handle<Map>(JSObject::cast(current)->map())));
+ current = HeapObject::cast(current->map()->prototype());
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
@@ -2342,7 +2356,7 @@
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
+ Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
@@ -2353,17 +2367,25 @@
}
Handle<String> name = instr->hydrogen()->name();
Label done;
- __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label check_passed;
+ __ CompareMapAndBranch(
+ object_map, map, &check_passed,
+ eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ Branch(&next, ne, scratch, Operand(map));
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&next);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
__ Branch(&done);
__ bind(&next);
}
@@ -2510,8 +2532,13 @@
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ And(scratch, result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ }
}
}
@@ -3025,7 +3052,7 @@
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
@@ -3216,7 +3243,7 @@
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(f0));
@@ -3777,7 +3804,7 @@
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
ToRegister(instr->index()),
@@ -3831,7 +3858,7 @@
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3907,7 +3934,7 @@
Register dst = ToRegister(instr->result());
Register overflow = scratch0();
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTagCheckOverflow(dst, src, overflow);
__ BranchOnOverflow(deferred->entry(), overflow);
__ bind(deferred->exit());
@@ -3975,7 +4002,7 @@
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
@@ -4169,7 +4196,7 @@
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Let the deferred code handle the HeapObject case.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4420,7 +4447,8 @@
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
@@ -5065,7 +5093,7 @@
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt();
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 94bb945..32a696b 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -43,22 +43,26 @@
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
+ Zone* zone)
: chunk_(chunk),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deopt_jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, zone),
+ deopt_jump_table_(4, zone),
+ deoptimization_literals_(8, zone),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- deferred_(8),
+ translations_(zone),
+ deferred_(8, zone),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ safepoints_(zone),
+ zone_(zone),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -71,6 +75,7 @@
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -173,7 +178,7 @@
void Abort(const char* format, ...);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -329,7 +334,8 @@
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -370,6 +376,8 @@
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ Zone* zone_;
+
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 4a5fbe3..87efae5 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -35,7 +35,7 @@
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
- moves_(32),
+ moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
@@ -80,7 +80,7 @@
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index d0f86cd..842001d 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -411,9 +411,9 @@
: spill_slot_count_(0),
info_(info),
graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) {
}
@@ -427,9 +427,9 @@
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
+ return LStackSlot::Create(index, zone());
}
}
@@ -474,23 +474,23 @@
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
- instructions_.Add(gap);
+ instructions_.Add(gap, zone());
index = instructions_.length();
- instructions_.Add(instr);
+ instructions_.Add(instr, zone());
} else {
index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
+ pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
+ return LConstantOperand::Create(constant->id(), zone());
}
@@ -529,7 +529,8 @@
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
}
@@ -762,7 +763,7 @@
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -985,7 +986,8 @@
hydrogen_env->parameter_count(),
argument_count_,
value_count,
- outer);
+ outer,
+ zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1595,7 +1597,8 @@
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(a1), instr->index());
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1644,10 +1647,9 @@
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+ if (instr->value()->type().IsSmi()) {
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@@ -2110,8 +2112,8 @@
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new(zone()) LAllocateObject(
- TempRegister(), TempRegister());
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index c3065e3..e21c921 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -332,8 +332,10 @@
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -2233,9 +2235,11 @@
}
void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
+ inlined_closures_.Add(closure, zone());
}
+ Zone* zone() const { return graph_->zone(); }
+
private:
int spill_slot_count_;
CompilationInfo* info_;
@@ -2252,7 +2256,7 @@
: chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->isolate()->zone()),
+ zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 6cd5e97..51b3a38 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3473,6 +3473,16 @@
Label* branch_to,
CompareMapMode mode) {
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode) {
Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
@@ -3481,15 +3491,15 @@
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
+ current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
- Branch(early_success, eq, scratch, right);
+ Branch(early_success, eq, obj_map, right);
right = Operand(Handle<Map>(current_map));
}
}
}
- Branch(branch_to, cond, scratch, right);
+ Branch(branch_to, cond, obj_map, right);
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 1766866..bb3dc01 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -992,6 +992,15 @@
Label* branch_to,
CompareMapMode mode = REQUIRE_EXACT_MAP);
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 2a45293..21d1ce1 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -119,8 +119,10 @@
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -781,7 +783,7 @@
for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1));
- if (global()) {
+ if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in a4 for the zero-length check later.
__ mov(t7, a2);
}
@@ -823,17 +825,22 @@
// Prepare a0 to initialize registers with its value in the next run.
__ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Special case for zero-length matches.
- // t7: capture start index
- // Not a zero-length match, restart.
- __ Branch(
- &load_char_start_regexp, ne, current_input_offset(), Operand(t7));
- // Offset from the end is zero if we already reached the end.
- __ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg));
- // Advance current position after a zero-length match.
- __ Addu(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t7: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(
+ &load_char_start_regexp, ne, current_input_offset(), Operand(t7));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ __ Addu(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
__ Branch(&load_char_start_regexp);
} else {
__ li(v0, Operand(SUCCESS));
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index d3f3bdd..d3fff0d 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -47,7 +47,7 @@
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 700eacc..967ce4a 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -422,21 +422,59 @@
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
// a0 : value.
Label exit;
+
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -459,14 +497,14 @@
if (!transition.is_null()) {
// Update the map of the object.
- __ li(scratch, Operand(transition));
- __ sw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ __ li(scratch1, Operand(transition));
+ __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
- scratch,
+ scratch1,
name_reg,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
@@ -485,7 +523,7 @@
__ sw(a0, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit, scratch);
+ __ JumpIfSmi(a0, &exit, scratch1);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
@@ -493,15 +531,16 @@
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
- __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(a0, FieldMemOperand(scratch, offset));
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sw(a0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(a0, &exit);
@@ -509,7 +548,7 @@
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, a0);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -1283,8 +1322,9 @@
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -2570,7 +2610,13 @@
Label miss;
// Name register might be clobbered.
- GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ a1, a2, a3, t0,
+ &miss);
__ bind(&miss);
__ li(a2, Operand(Handle<String>(name))); // Restore name.
Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
@@ -2626,6 +2672,52 @@
}
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ Handle<String> name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(a0);
+
+ // Call the JavaScript getter with the receiver and the value on the stack.
+ __ push(a1);
+ __ push(a0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(v0);
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver,
Handle<String> name) {
@@ -2793,6 +2885,44 @@
}
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a0, &miss);
+ CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(a0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
@@ -3109,7 +3239,13 @@
// a3 is used as scratch register. a1 and a2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ a2, a1, a3, t0,
+ &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b4df6c8..5aac503 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -684,6 +684,7 @@
VerifyPointer(name());
VerifyPointer(data());
VerifyPointer(flag());
+ VerifyPointer(expected_receiver_type());
}
@@ -928,21 +929,6 @@
return false;
}
break;
- case ELEMENTS_TRANSITION: {
- Object* object = GetValue(i);
- if (!CheckOneBackPointer(current_map, object)) {
- return false;
- }
- if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); ++i) {
- if (!CheckOneBackPointer(current_map, array->get(i))) {
- return false;
- }
- }
- }
- break;
- }
case CALLBACKS: {
Object* object = GetValue(i);
if (object->IsAccessorPair()) {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0620e0e..da1a35b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1267,30 +1267,25 @@
if (current_kind == FAST_HOLEY_ELEMENTS) return this;
Heap* heap = GetHeap();
Object* the_hole = heap->the_hole_value();
- Object* heap_number_map = heap->heap_number_map();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
} else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
- HeapObject::cast(current)->map() == heap_number_map &&
- IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
- }
- } else {
- if (!current->IsNumber()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsFastSmiElementsKind(target_kind)) {
if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
+ target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
} else {
- target_kind = FAST_ELEMENTS;
+ target_kind = FAST_DOUBLE_ELEMENTS;
}
}
+ } else if (is_holey) {
+ target_kind = FAST_HOLEY_ELEMENTS;
+ break;
+ } else {
+ target_kind = FAST_ELEMENTS;
}
}
}
@@ -1611,13 +1606,23 @@
}
-int JSObject::MaxFastProperties() {
+bool JSObject::TooManyFastProperties(int properties,
+ JSObject::StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
- // kMaxFastProperties in-object properties. When this is the case,
+ // kFastPropertiesSoftLimit in-object properties. When this is the case,
// it is very unlikely that the object is being used as a dictionary
// and there is a good chance that allowing more map transitions
// will be worth it.
- return Max(map()->inobject_properties(), kMaxFastProperties);
+ int inobject = map()->inobject_properties();
+
+ int limit;
+ if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ||
+ map()->used_for_prototype()) {
+ limit = Max(inobject, kMaxFastProperties);
+ } else {
+ limit = Max(inobject, kFastPropertiesSoftLimit);
+ }
+ return properties > limit;
}
@@ -1871,9 +1876,14 @@
bool DescriptorArray::IsEmpty() {
ASSERT(this->IsSmi() ||
- this->length() > kFirstIndex ||
+ this->MayContainTransitions() ||
this == HEAP->empty_descriptor_array());
- return this->IsSmi() || length() <= kFirstIndex;
+ return this->IsSmi() || length() < kFirstIndex;
+}
+
+
+bool DescriptorArray::MayContainTransitions() {
+ return length() >= kTransitionsIndex;
}
@@ -1883,7 +1893,7 @@
}
void DescriptorArray::set_bit_field3_storage(int value) {
- ASSERT(!IsEmpty());
+ ASSERT(this->MayContainTransitions());
WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
}
@@ -1907,7 +1917,7 @@
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
- return LinearSearch(name, nof);
+ return LinearSearch(EXPECT_SORTED, name, nof);
}
// Slow case: perform binary search.
@@ -1925,6 +1935,30 @@
}
+Map* DescriptorArray::elements_transition_map() {
+ if (!this->MayContainTransitions()) {
+ return NULL;
+ }
+ Object* transition_map = get(kTransitionsIndex);
+ if (transition_map == Smi::FromInt(0)) {
+ return NULL;
+ } else {
+ return Map::cast(transition_map);
+ }
+}
+
+
+void DescriptorArray::set_elements_transition_map(
+ Map* transition_map, WriteBarrierMode mode) {
+ ASSERT(this->length() > kTransitionsIndex);
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kTransitionsOffset, transition_map);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kTransitionsOffset, transition_map, mode);
+ ASSERT(DescriptorArray::cast(this));
+}
+
+
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@@ -2010,7 +2044,6 @@
switch (GetType(descriptor_number)) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
return true;
case CALLBACKS: {
Object* value = GetValue(descriptor_number);
@@ -2949,6 +2982,20 @@
}
+void Map::set_used_for_prototype(bool value) {
+ if (value) {
+ set_bit_field3(bit_field3() | (1 << kUsedForPrototype));
+ } else {
+ set_bit_field3(bit_field3() & ~(1 << kUsedForPrototype));
+ }
+}
+
+
+bool Map::used_for_prototype() {
+ return ((1 << kUsedForPrototype) & bit_field3()) != 0;
+}
+
+
JSFunction* Map::unchecked_constructor() {
return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
}
@@ -3452,6 +3499,16 @@
}
+Map* Map::elements_transition_map() {
+ return instance_descriptors()->elements_transition_map();
+}
+
+
+void Map::set_elements_transition_map(Map* transitioned_map) {
+ return instance_descriptors()->set_elements_transition_map(transitioned_map);
+}
+
+
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
Heap* heap = GetHeap();
ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
@@ -3511,10 +3568,7 @@
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction,
- next_function_link,
- Object,
- kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3527,6 +3581,8 @@
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
+ACCESSORS(AccessorInfo, expected_receiver_type, Object,
+ kExpectedReceiverTypeOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -3617,7 +3673,7 @@
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -3666,8 +3722,10 @@
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
+SMI_ACCESSORS(SharedFunctionInfo,
+ stress_deopt_counter,
+ kStressDeoptCounterOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -3719,8 +3777,10 @@
kThisPropertyAssignmentsCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
+ stress_deopt_counter,
+ kStressDeoptCounterOffset)
#endif
@@ -3921,12 +3981,64 @@
}
+int SharedFunctionInfo::ic_age() {
+ return ICAgeBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_ic_age(int ic_age) {
+ set_counters(ICAgeBits::update(counters(), ic_age));
+}
+
+
+int SharedFunctionInfo::deopt_count() {
+ return DeoptCountBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_deopt_count(int deopt_count) {
+ set_counters(DeoptCountBits::update(counters(), deopt_count));
+}
+
+
+void SharedFunctionInfo::increment_deopt_count() {
+ int value = counters();
+ int deopt_count = DeoptCountBits::decode(value);
+ deopt_count = (deopt_count + 1) & DeoptCountBits::kMax;
+ set_counters(DeoptCountBits::update(value, deopt_count));
+}
+
+
+int SharedFunctionInfo::opt_reenable_tries() {
+ return OptReenableTriesBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
+ set_counters(OptReenableTriesBits::update(counters(), tries));
+}
+
+
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
}
+void SharedFunctionInfo::TryReenableOptimization() {
+ int tries = opt_reenable_tries();
+ set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
+ // We reenable optimization whenever the number of tries is a large
+ // enough power of 2.
+ if (tries >= 16 && (((tries - 1) & tries) == 0)) {
+ set_optimization_disabled(false);
+ set_opt_count(0);
+ set_deopt_count(0);
+ code()->set_optimizable(true);
+ }
+}
+
+
bool JSFunction::IsBuiltin() {
return context()->global()->IsJSBuiltinsObject();
}
@@ -4049,15 +4161,12 @@
maps->set(kind, current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
- ElementsKind transitioned_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map = current_map->CopyDropTransitions();
- Map* new_map = NULL;
- if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
- new_map->set_elements_kind(transitioned_kind);
- maybe_new_map = current_map->AddElementsTransition(transitioned_kind,
- new_map);
- if (maybe_new_map->IsFailure()) return maybe_new_map;
- maps->set(transitioned_kind, new_map);
+ Map* new_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
+ MaybeObject* maybe_new_map =
+ current_map->CreateNextElementsTransition(next_kind);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ maps->set(next_kind, new_map);
current_map = new_map;
}
global_context->set_js_array_maps(maps);
@@ -4099,6 +4208,7 @@
return instance_prototype();
}
+
bool JSFunction::should_have_prototype() {
return map()->function_with_prototype();
}
@@ -4737,6 +4847,13 @@
}
+bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
+ Object* function_template = expected_receiver_type();
+ if (!function_template->IsFunctionTemplateInfo()) return true;
+ return receiver->IsInstanceOf(FunctionTemplateInfo::cast(function_template));
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 3aea5f0..b886168 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -273,25 +273,6 @@
descs->GetCallbacksObject(i)->ShortPrint(out);
PrintF(out, " (callback)\n");
break;
- case ELEMENTS_TRANSITION: {
- PrintF(out, "(elements transition to ");
- Object* descriptor_contents = descs->GetValue(i);
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- PrintElementsKind(out, map->elements_kind());
- } else {
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Map* map = Map::cast(map_array->get(i));
- if (i != 0) {
- PrintF(out, ", ");
- }
- PrintElementsKind(out, map->elements_kind());
- }
- }
- PrintF(out, ")\n");
- break;
- }
case MAP_TRANSITION:
PrintF(out, "(map transition)\n");
break;
@@ -438,6 +419,9 @@
PrintF(out,
"]\n - prototype = %p\n",
reinterpret_cast<void*>(GetPrototype()));
+ PrintF(out,
+ " - elements transition to = %p\n",
+ reinterpret_cast<void*>(map()->elements_transition_map()));
PrintF(out, " {\n");
PrintProperties(out);
PrintElements(out);
@@ -559,6 +543,8 @@
prototype()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
+ PrintF(out, "\n - code cache: ");
+ code_cache()->ShortPrint(out);
PrintF(out, "\n");
}
diff --git a/src/objects.cc b/src/objects.cc
index 49ab970..d3e6492 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -178,6 +178,16 @@
// api style callbacks.
if (structure->IsAccessorInfo()) {
AccessorInfo* data = AccessorInfo::cast(structure);
+ if (!data->IsCompatibleReceiver(receiver)) {
+ Handle<Object> name_handle(name);
+ Handle<Object> receiver_handle(receiver);
+ Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("incompatible_method_receiver",
+ HandleVector(args,
+ ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope(isolate);
@@ -629,7 +639,6 @@
recvr, name, attributes);
}
case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
break;
@@ -1514,7 +1523,8 @@
MaybeObject* JSObject::AddFastProperty(String* name,
Object* value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode) {
ASSERT(!IsJSGlobalProxy());
// Normalize the object if the name is an actual string (not the
@@ -1552,10 +1562,7 @@
// Element transitions are stored in the descriptor for property "", which is
// not a identifier and should have forced a switch to slow properties above.
- ASSERT(descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) != ELEMENTS_TRANSITION);
- bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) == ELEMENTS_TRANSITION;
+ bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound;
bool allow_map_transition =
can_insert_transition &&
(isolate->context()->global_context()->object_function()->map() != map());
@@ -1580,7 +1587,7 @@
}
if (map()->unused_property_fields() == 0) {
- if (properties()->length() > MaxFastProperties()) {
+ if (TooManyFastProperties(properties()->length(), store_mode)) {
Object* obj;
{ MaybeObject* maybe_obj =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
@@ -1601,7 +1608,9 @@
}
// We have now allocated all the necessary objects.
// All the changes can be applied at once, so they are atomic.
- map()->set_instance_descriptors(old_descriptors);
+ if (allow_map_transition) {
+ map()->set_instance_descriptors(old_descriptors);
+ }
new_map->SetBackPointer(map());
new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
set_map(new_map);
@@ -1710,7 +1719,8 @@
MaybeObject* JSObject::AddProperty(String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -1733,7 +1743,7 @@
JSFunction::cast(value),
attributes);
} else {
- return AddFastProperty(name, value, attributes);
+ return AddFastProperty(name, value, attributes, store_mode);
}
} else {
// Normalize the object to prevent very large instance descriptors.
@@ -1762,14 +1772,11 @@
// found. Use set property to handle all these cases.
return SetProperty(&result, name, value, attributes, strict_mode);
}
- bool found = false;
+ bool done = false;
MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
+ result_object =
+ SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ if (done) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode);
}
@@ -1833,7 +1840,7 @@
Object* new_value,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
- properties()->length() > MaxFastProperties()) {
+ TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
Object* obj;
{ MaybeObject* maybe_obj =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
@@ -1948,10 +1955,11 @@
MaybeObject* JSReceiver::SetProperty(String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode) {
LookupResult result(GetIsolate());
LocalLookup(name, &result);
- return SetProperty(&result, name, value, attributes, strict_mode);
+ return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
}
@@ -1984,6 +1992,16 @@
if (structure->IsAccessorInfo()) {
// api style callbacks
AccessorInfo* data = AccessorInfo::cast(structure);
+ if (!data->IsCompatibleReceiver(this)) {
+ Handle<Object> name_handle(name);
+ Handle<Object> receiver_handle(this);
+ Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("incompatible_method_receiver",
+ HandleVector(args,
+ ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
@@ -2049,26 +2067,6 @@
}
-void JSObject::LookupCallbackSetterInPrototypes(String* name,
- LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype()) {
- if (pt->IsJSProxy()) {
- return result->HandlerResult(JSProxy::cast(pt));
- }
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
- // Found non-callback or read-only callback, stop looking.
- break;
- }
- }
- result->NotFound();
-}
-
-
MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
uint32_t index,
Object* value,
@@ -2085,7 +2083,7 @@
*found = true; // Force abort
return maybe;
}
- return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
+ return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
this, name, value, NONE, strict_mode, found);
}
if (!JSObject::cast(pt)->HasDictionaryElements()) {
@@ -2110,45 +2108,60 @@
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+MaybeObject* JSObject::SetPropertyViaPrototypes(
String* name,
Object* value,
PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ bool* done) {
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
+
+ *done = false;
// We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result(heap->isolate());
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsFound()) {
- *found = true;
- if (accessor_result.type() == CALLBACKS) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder(),
- strict_mode);
- } else if (accessor_result.type() == HANDLER) {
- // There is a proxy in the prototype chain. Invoke its
- // getPropertyDescriptor trap.
- bool found = false;
- // SetPropertyWithHandlerIfDefiningSetter can cause GC,
- // make sure to use the handlified references after calling
- // the function.
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> hvalue(value);
- MaybeObject* result =
- accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
- this, name, value, attributes, strict_mode, &found);
- if (found) return result;
- // The proxy does not define the property as an accessor.
- // Consequently, it has no effect on setting the receiver.
- return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+ // accessor that wants to handle the property, or whether the property is
+ // read-only on the prototype chain.
+ LookupResult result(isolate);
+ LookupRealNamedPropertyInPrototypes(name, &result);
+ if (result.IsFound()) {
+ switch (result.type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ *done = result.IsReadOnly();
+ break;
+ case INTERCEPTOR: {
+ PropertyAttributes attr =
+ result.holder()->GetPropertyAttributeWithInterceptor(
+ this, name, true);
+ *done = !!(attr & READ_ONLY);
+ break;
+ }
+ case CALLBACKS: {
+ if (!FLAG_es5_readonly && result.IsReadOnly()) break;
+ *done = true;
+ return SetPropertyWithCallback(result.GetCallbackObject(),
+ name, value, result.holder(), strict_mode);
+ }
+ case HANDLER: {
+ return result.proxy()->SetPropertyViaPrototypesWithHandler(
+ this, name, value, attributes, strict_mode, done);
+ }
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR:
+ break;
}
}
- *found = false;
+
+ // If we get here with *done true, we have encountered a read-only property.
+ if (!FLAG_es5_readonly) *done = false;
+ if (*done) {
+ if (strict_mode == kNonStrictMode) return value;
+ Handle<Object> args[] = { Handle<Object>(name), Handle<Object>(this)};
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ }
return heap->the_hole_value();
}
@@ -2207,9 +2220,8 @@
if (IsTransitionableFastElementsKind(kind)) {
while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
kind = GetNextMoreGeneralFastElementsKind(kind, false);
- bool dummy = true;
Handle<Map> maybe_transitioned_map =
- MaybeNull(current_map->LookupElementsTransitionMap(kind, &dummy));
+ MaybeNull(current_map->LookupElementsTransitionMap(kind));
if (maybe_transitioned_map.is_null()) break;
if (ContainsMap(candidates, maybe_transitioned_map) &&
(packed || !IsFastPackedElementsKind(kind))) {
@@ -2223,207 +2235,68 @@
}
-static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
- ElementsKind elements_kind) {
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- if (map->elements_kind() == elements_kind) {
- return map;
+static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
+ Map* current_map = map;
+ int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
+ int to_index = GetSequenceIndexFromFastElementsKind(to_kind);
+ for (; index < to_index; ++index) {
+ Map* next_map = current_map->elements_transition_map();
+ if (next_map == NULL) {
+ return current_map;
}
- return NULL;
+ current_map = next_map;
}
-
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Object* current = map_array->get(i);
- // Skip undefined slots, they are sentinels for reclaimed maps.
- if (!current->IsUndefined()) {
- Map* current_map = Map::cast(map_array->get(i));
- if (current_map->elements_kind() == elements_kind) {
- return current_map;
- }
- }
- }
-
- return NULL;
+ ASSERT(current_map->elements_kind() == to_kind);
+ return current_map;
}
-static MaybeObject* AddElementsTransitionMapToDescriptor(
- Object* descriptor_contents,
- Map* new_map) {
- // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
- // simply add the map.
- if (descriptor_contents == NULL) {
- return new_map;
- }
-
- // There was already a map in the descriptor, create a 2-element FixedArray
- // to contain the existing map plus the new one.
- FixedArray* new_array;
- Heap* heap = new_map->GetHeap();
- if (descriptor_contents->IsMap()) {
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
- }
- new_array->set(0, descriptor_contents);
- new_array->set(1, new_map);
- return new_array;
- }
-
- // The descriptor already contained a list of maps for different ElementKinds
- // of ELEMENTS_TRANSITION, first check the existing array for an undefined
- // slot, and if that's not available, create a FixedArray to hold the existing
- // maps plus the new one and fill it in.
- FixedArray* array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < array->length(); ++i) {
- if (array->get(i)->IsUndefined()) {
- array->set(i, new_map);
- return array;
- }
- }
-
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(array->length() + 1, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
- }
- int i = 0;
- while (i < array->length()) {
- new_array->set(i, array->get(i));
- ++i;
- }
- new_array->set(i, new_map);
- return new_array;
-}
-
-
-String* Map::elements_transition_sentinel_name() {
- return GetHeap()->empty_symbol();
-}
-
-
-Object* Map::GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transition) {
- // Get the cached index for the descriptors lookup, or find and cache it.
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
- int index = cache->Lookup(descriptors, sentinel_name);
- if (index == DescriptorLookupCache::kAbsent) {
- index = descriptors->Search(sentinel_name);
- cache->Update(descriptors, sentinel_name, index);
- }
- // If the transition already exists, return its descriptor.
- if (index != DescriptorArray::kNotFound) {
- PropertyDetails details = descriptors->GetDetails(index);
- if (details.type() == ELEMENTS_TRANSITION) {
- return descriptors->GetValue(index);
- } else {
- if (safe_to_add_transition != NULL) {
- *safe_to_add_transition = false;
- }
+Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
+ if (this->instance_descriptors()->MayContainTransitions() &&
+ IsMoreGeneralElementsKindTransition(this->elements_kind(), to_kind)) {
+ Map* to_map = FindClosestElementsTransition(this, to_kind);
+ if (to_map->elements_kind() == to_kind) {
+ return to_map;
}
}
return NULL;
}
-Map* Map::LookupElementsTransitionMap(ElementsKind to_kind,
- bool* safe_to_add_transition) {
- ElementsKind from_kind = elements_kind();
- if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
- if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- if (safe_to_add_transition) *safe_to_add_transition = false;
- return NULL;
- }
- ElementsKind transitioned_from_kind =
- GetNextMoreGeneralFastElementsKind(from_kind, false);
+MaybeObject* Map::CreateNextElementsTransition(ElementsKind next_kind) {
+ ASSERT(elements_transition_map() == NULL);
+ ASSERT(GetSequenceIndexFromFastElementsKind(elements_kind()) ==
+ (GetSequenceIndexFromFastElementsKind(next_kind) - 1));
+ Map* next_map;
+ MaybeObject* maybe_next_map =
+ this->CopyDropTransitions(DescriptorArray::CANNOT_BE_SHARED);
+ if (!maybe_next_map->To(&next_map)) return maybe_next_map;
- // If the transition is a single step in the transition sequence, fall
- // through to looking it up and returning it. If it requires several steps,
- // divide and conquer.
- if (transitioned_from_kind != to_kind) {
- // If the transition is several steps in the lattice, divide and conquer.
- Map* from_map = LookupElementsTransitionMap(transitioned_from_kind,
- safe_to_add_transition);
- if (from_map == NULL) return NULL;
- return from_map->LookupElementsTransitionMap(to_kind,
- safe_to_add_transition);
- }
- }
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), safe_to_add_transition);
- if (descriptor_contents != NULL) {
- Map* maybe_transition_map =
- GetElementsTransitionMapFromDescriptor(descriptor_contents,
- to_kind);
- ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
- return maybe_transition_map;
- }
- return NULL;
+ next_map->set_elements_kind(next_kind);
+ next_map->SetBackPointer(this);
+ this->set_elements_transition_map(next_map);
+ return next_map;
}
-MaybeObject* Map::AddElementsTransition(ElementsKind to_kind,
- Map* transitioned_map) {
- ElementsKind from_kind = elements_kind();
- if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
- ASSERT(IsMoreGeneralElementsKindTransition(from_kind, to_kind));
- ElementsKind transitioned_from_kind =
- GetNextMoreGeneralFastElementsKind(from_kind, false);
- // The map transitions graph should be a tree, therefore transitions to
- // ElementsKind that are not adjacent in the ElementsKind sequence are not
- // done directly, but instead by going through intermediate ElementsKinds
- // first.
- if (to_kind != transitioned_from_kind) {
- bool safe_to_add = true;
- Map* intermediate_map = LookupElementsTransitionMap(
- transitioned_from_kind, &safe_to_add);
- // This method is only called when safe_to_add has been found to be true
- // earlier.
- ASSERT(safe_to_add);
+static MaybeObject* AddMissingElementsTransitions(Map* map,
+ ElementsKind to_kind) {
+ int index = GetSequenceIndexFromFastElementsKind(map->elements_kind()) + 1;
+ int to_index = GetSequenceIndexFromFastElementsKind(to_kind);
+ ASSERT(index <= to_index);
- if (intermediate_map == NULL) {
- MaybeObject* maybe_map = CopyDropTransitions();
- if (!maybe_map->To(&intermediate_map)) return maybe_map;
- intermediate_map->set_elements_kind(transitioned_from_kind);
- MaybeObject* maybe_transition = AddElementsTransition(
- transitioned_from_kind, intermediate_map);
- if (maybe_transition->IsFailure()) return maybe_transition;
- }
- return intermediate_map->AddElementsTransition(to_kind, transitioned_map);
- }
+ Map* current_map = map;
+
+ for (; index <= to_index; ++index) {
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index);
+ MaybeObject* maybe_next_map =
+ current_map->CreateNextElementsTransition(next_kind);
+ if (!maybe_next_map->To(¤t_map)) return maybe_next_map;
}
- bool safe_to_add_transition = true;
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), &safe_to_add_transition);
- // This method is only called when safe_to_add_transition has been found
- // to be true earlier.
- ASSERT(safe_to_add_transition);
- MaybeObject* maybe_new_contents =
- AddElementsTransitionMapToDescriptor(descriptor_contents,
- transitioned_map);
- Object* new_contents;
- if (!maybe_new_contents->ToObject(&new_contents)) {
- return maybe_new_contents;
- }
-
- ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
- new_contents);
- Object* new_descriptors;
- MaybeObject* maybe_new_descriptors =
- instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- transitioned_map->SetBackPointer(this);
- return this;
+ ASSERT(current_map->elements_kind() == to_kind);
+ return current_map;
}
@@ -2436,58 +2309,60 @@
}
+// If the map is using the empty descriptor array, install a new empty
+// descriptor array that will contain an element transition.
+// TODO(verwaest) Goes away once the descriptor array is immutable.
+static MaybeObject* EnsureMayContainTransitions(Map* map) {
+ if (map->instance_descriptors()->MayContainTransitions()) return map;
+ DescriptorArray* descriptor_array;
+ MaybeObject* maybe_descriptor_array =
+ DescriptorArray::Allocate(0, DescriptorArray::CANNOT_BE_SHARED);
+ if (!maybe_descriptor_array->To(&descriptor_array)) {
+ return maybe_descriptor_array;
+ }
+ map->set_instance_descriptors(descriptor_array);
+ return map;
+}
+
+
MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
- Map* current_map = map();
- ElementsKind from_kind = current_map->elements_kind();
+ Map* start_map = map();
+ ElementsKind from_kind = start_map->elements_kind();
- if (from_kind == to_kind) return current_map;
-
- // Only objects with FastProperties can have DescriptorArrays and can track
- // element-related maps. Also don't add descriptors to maps that are shared.
- bool safe_to_add_transition = HasFastProperties() &&
- !current_map->IsUndefined() &&
- !current_map->is_shared();
-
- // Prevent long chains of DICTIONARY -> FAST_*_ELEMENTS maps caused by objects
- // with elements that switch back and forth between dictionary and fast
- // element modes.
- if (from_kind == DICTIONARY_ELEMENTS &&
- IsFastElementsKind(to_kind)) {
- safe_to_add_transition = false;
+ if (from_kind == to_kind) {
+ return start_map;
}
- if (safe_to_add_transition) {
- // It's only safe to manipulate the descriptor array if it would be
- // safe to add a transition.
- Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
- to_kind, &safe_to_add_transition);
- if (maybe_transition_map != NULL) {
- return maybe_transition_map;
- }
- }
-
- Map* new_map = NULL;
-
- // No transition to an existing map for the given ElementsKind. Make a new
- // one.
- { MaybeObject* maybe_map = current_map->CopyDropTransitions();
- if (!maybe_map->To(&new_map)) return maybe_map;
- }
-
- new_map->set_elements_kind(to_kind);
-
- // Only remember the map transition if the object's map is NOT equal to the
- // global object_function's map and there is not an already existing
- // non-matching element transition.
Context* global_context = GetIsolate()->context()->global_context();
- bool allow_map_transition = safe_to_add_transition &&
- (global_context->object_function()->map() != map());
- if (allow_map_transition) {
- MaybeObject* maybe_transition =
- current_map->AddElementsTransition(to_kind, new_map);
- if (maybe_transition->IsFailure()) return maybe_transition;
+ bool allow_store_transition =
+ // Only remember the map transition if the object's map is NOT equal to
+ // the global object_function's map and there is not an already existing
+ // non-matching element transition.
+ (global_context->object_function()->map() != map()) &&
+ !start_map->IsUndefined() && !start_map->is_shared() &&
+ // Only store fast element maps in ascending generality.
+ IsTransitionableFastElementsKind(from_kind) &&
+ IsFastElementsKind(to_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind);
+
+ if (!allow_store_transition) {
+ // Create a new free-floating map only if we are not allowed to store it.
+ Map* new_map = NULL;
+ MaybeObject* maybe_new_map =
+ start_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map->set_elements_kind(to_kind);
+ return new_map;
}
- return new_map;
+
+ EnsureMayContainTransitions(start_map);
+ Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
+
+ if (closest_map->elements_kind() == to_kind) {
+ return closest_map;
+ }
+
+ return AddMissingElementsTransitions(closest_map, to_kind);
}
@@ -2553,9 +2428,13 @@
Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = JSObject::cast(pt)->GetPrototype()) {
+ pt = pt->GetPrototype()) {
+ if (pt->IsJSProxy()) {
+ return result->HandlerResult(JSProxy::cast(pt));
+ }
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
+ ASSERT(!(result->IsProperty() && result->type() == INTERCEPTOR));
+ if (result->IsProperty()) return;
}
result->NotFound();
}
@@ -2569,7 +2448,7 @@
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupCallbackSetterInPrototypes(name, result);
+ LookupRealNamedPropertyInPrototypes(name, result);
}
if (result->IsProperty()) {
@@ -2622,13 +2501,14 @@
String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode) {
if (result->IsFound() && result->type() == HANDLER) {
return result->proxy()->SetPropertyWithHandler(
this, key, value, attributes, strict_mode);
} else {
return JSObject::cast(this)->SetPropertyForResult(
- result, key, value, attributes, strict_mode);
+ result, key, value, attributes, strict_mode, store_mode);
}
}
@@ -2642,7 +2522,7 @@
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return false;
return result->ToBoolean()->IsTrue();
}
@@ -2668,79 +2548,92 @@
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
JSReceiver* receiver_raw,
String* name_raw,
Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- bool* found) {
- *found = true; // except where defined otherwise...
- Isolate* isolate = GetHeap()->isolate();
- Handle<JSReceiver> receiver(receiver_raw);
+ bool* done) {
+ Isolate* isolate = GetIsolate();
Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name(name_raw);
Handle<Object> value(value_raw);
+ Handle<Object> handler(this->handler()); // Trap might morph proxy.
+
+ *done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (!result->IsUndefined()) {
- // The proxy handler cares about this property.
- // Check whether it is virtualized as an accessor.
- // Emulate [[GetProperty]] semantics for proxies.
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- Handle<String> conf_name =
- isolate->factory()->LookupAsciiSymbol("configurable_");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
- ASSERT(!isolate->has_pending_exception());
- if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
- Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- ASSERT(configurable->IsTrue());
-
- // Check for AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
- Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
- ASSERT(!isolate->has_pending_exception());
- if (!setter->IsUndefined()) {
- // We have a setter -- invoke it.
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
- } else {
- Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
- Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
- ASSERT(!isolate->has_pending_exception());
- if (!getter->IsUndefined()) {
- // We have a getter but no setter -- the property may not be
- // written. In strict mode, throw an error.
- if (strict_mode == kNonStrictMode) return *value;
- Handle<Object> args[] = { name, proxy };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- }
- // Fall-through.
+ if (result->IsUndefined()) {
+ *done = false;
+ return GetHeap()->the_hole_value();
}
- // The proxy does not define the property as an accessor.
- *found = false;
- return *value;
+ // Emulate [[GetProperty]] semantics for proxies.
+ bool has_pending_exception;
+ Handle<Object> argv[] = { result };
+ Handle<Object> desc =
+ Execution::Call(isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+
+ // [[GetProperty]] requires to check that all properties are configurable.
+ Handle<String> configurable_name =
+ isolate->factory()->LookupAsciiSymbol("configurable_");
+ Handle<Object> configurable(
+ v8::internal::GetProperty(desc, configurable_name));
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(configurable->IsTrue() || configurable->IsFalse());
+ if (configurable->IsFalse()) {
+ Handle<String> trap =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> args[] = { handler, trap, name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
+ ASSERT(configurable->IsTrue());
+
+ // Check for DataDescriptor.
+ Handle<String> hasWritable_name =
+ isolate->factory()->LookupAsciiSymbol("hasWritable_");
+ Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name));
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
+ if (hasWritable->IsTrue()) {
+ Handle<String> writable_name =
+ isolate->factory()->LookupAsciiSymbol("writable_");
+ Handle<Object> writable(v8::internal::GetProperty(desc, writable_name));
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(writable->IsTrue() || writable->IsFalse());
+ *done = writable->IsFalse();
+ if (!*done) return GetHeap()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return *value;
+ Handle<Object> args[] = { name, receiver };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
+
+ // We have an AccessorDescriptor.
+ Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+ Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ ASSERT(!isolate->has_pending_exception());
+ if (!setter->IsUndefined()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return receiver->SetPropertyWithDefinedSetter(
+ JSReceiver::cast(*setter), *value);
+ }
+
+ if (strict_mode == kNonStrictMode) return *value;
+ Handle<Object> args2[] = { name, proxy };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
+ return isolate->Throw(*error);
}
@@ -2898,7 +2791,8 @@
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -2929,26 +2823,19 @@
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetPropertyForResult(
- result, name, value, attributes, strict_mode);
+ result, name, value, attributes, strict_mode, store_mode);
}
if (!result->IsProperty() && !IsJSContextExtensionObject()) {
- bool found = false;
- MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
+ bool done = false;
+ MaybeObject* result_object =
+ SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ if (done) return result_object;
}
- // At this point, no GC should have happened, as this would invalidate
- // 'result', which we cannot handlify!
-
if (!result->IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes, strict_mode);
+ return AddProperty(name, value, attributes, strict_mode, store_mode);
}
if (result->IsReadOnly() && result->IsProperty()) {
if (strict_mode == kStrictMode) {
@@ -3015,7 +2902,6 @@
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
}
case NULL_DESCRIPTOR:
- case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case HANDLER:
UNREACHABLE();
@@ -3113,9 +2999,7 @@
case CONSTANT_TRANSITION:
// Replace with a MAP_TRANSITION to a new map with a FIELD, even
// if the value is a function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case NULL_DESCRIPTOR:
- case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case HANDLER:
UNREACHABLE();
@@ -3274,14 +3158,20 @@
Map::cast(result)->SharedMapVerify();
}
if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit.
+ // The cached map should match newly created normalized map bit-by-bit,
+ // except for the code cache, which can contain some ics which can be
+ // applied to the shared map.
Object* fresh;
{ MaybeObject* maybe_fresh =
fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (maybe_fresh->ToObject(&fresh)) {
ASSERT(memcmp(Map::cast(fresh)->address(),
Map::cast(result)->address(),
- Map::kSize) == 0);
+ Map::kCodeCacheOffset) == 0);
+ int offset = Map::kCodeCacheOffset + kPointerSize;
+ ASSERT(memcmp(Map::cast(fresh)->address() + offset,
+ Map::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
}
}
@@ -3406,7 +3296,6 @@
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
case INTERCEPTOR:
- case ELEMENTS_TRANSITION:
break;
case HANDLER:
case NORMAL:
@@ -4192,7 +4081,8 @@
// Do a map transition, other objects with this map may still
// be extensible.
Map* new_map;
- { MaybeObject* maybe = map()->CopyDropTransitions();
+ { MaybeObject* maybe =
+ map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
if (!maybe->To<Map>(&new_map)) return maybe;
}
new_map->set_is_extensible(false);
@@ -4972,7 +4862,8 @@
JSFunction* ctor = JSFunction::cast(constructor());
Object* descriptors;
{ MaybeObject* maybe_descriptors =
- ctor->initial_map()->instance_descriptors()->RemoveTransitions();
+ ctor->initial_map()->instance_descriptors()->RemoveTransitions(
+ DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
}
Map::cast(result)->set_instance_descriptors(
@@ -5012,6 +4903,7 @@
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_bit_field3(bit_field3());
+ Map::cast(result)->set_code_cache(code_cache());
Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -5025,20 +4917,22 @@
}
-MaybeObject* Map::CopyDropTransitions() {
+MaybeObject* Map::CopyDropTransitions(
+ DescriptorArray::SharedMode shared_mode) {
Object* new_map;
{ MaybeObject* maybe_new_map = CopyDropDescriptors();
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
}
Object* descriptors;
{ MaybeObject* maybe_descriptors =
- instance_descriptors()->RemoveTransitions();
+ instance_descriptors()->RemoveTransitions(shared_mode);
if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
}
cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
return new_map;
}
+
void Map::UpdateCodeCache(Handle<Map> map,
Handle<String> name,
Handle<Code> code) {
@@ -5048,6 +4942,8 @@
}
MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
+ ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
+
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
@@ -5098,11 +4994,13 @@
void Start() {
ASSERT(!IsIterating());
- if (HasDescriptors()) *DescriptorArrayHeader() = Smi::FromInt(0);
+ if (descriptor_array_->MayContainTransitions())
+ *DescriptorArrayHeader() = Smi::FromInt(0);
}
bool IsIterating() {
- return HasDescriptors() && (*DescriptorArrayHeader())->IsSmi();
+ return descriptor_array_->MayContainTransitions() &&
+ (*DescriptorArrayHeader())->IsSmi();
}
Map* Next() {
@@ -5120,7 +5018,6 @@
switch (details.type()) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
// We definitely have a map transition.
*DescriptorArrayHeader() = Smi::FromInt(raw_index + 2);
return static_cast<Map*>(descriptor_array_->GetValue(index));
@@ -5154,15 +5051,18 @@
break;
}
}
+ if (index == descriptor_array_->number_of_descriptors()) {
+ Map* elements_transition = descriptor_array_->elements_transition_map();
+ if (elements_transition != NULL) {
+ *DescriptorArrayHeader() = Smi::FromInt(index + 1);
+ return elements_transition;
+ }
+ }
*DescriptorArrayHeader() = descriptor_array_->GetHeap()->fixed_array_map();
return NULL;
}
private:
- bool HasDescriptors() {
- return descriptor_array_->length() > DescriptorArray::kFirstIndex;
- }
-
Object** DescriptorArrayHeader() {
return HeapObject::RawField(descriptor_array_, DescriptorArray::kMapOffset);
}
@@ -5851,23 +5751,30 @@
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
+MaybeObject* DescriptorArray::Allocate(int number_of_descriptors,
+ SharedMode shared_mode) {
Heap* heap = Isolate::Current()->heap();
- if (number_of_descriptors == 0) {
- return heap->empty_descriptor_array();
- }
- // Allocate the array of keys.
- Object* array;
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
// Do not use DescriptorArray::cast on incomplete object.
- FixedArray* result = FixedArray::cast(array);
-
+ FixedArray* result;
+ if (number_of_descriptors == 0) {
+ if (shared_mode == MAY_BE_SHARED) {
+ return heap->empty_descriptor_array();
+ }
+ { MaybeObject* maybe_array =
+ heap->AllocateFixedArray(kTransitionsIndex + 1);
+ if (!maybe_array->To(&result)) return maybe_array;
+ }
+ } else {
+ // Allocate the array of keys.
+ { MaybeObject* maybe_array =
+ heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ if (!maybe_array->To(&result)) return maybe_array;
+ }
+ result->set(kEnumerationIndexIndex,
+ Smi::FromInt(PropertyDetails::kInitialIndex));
+ }
result->set(kBitField3StorageIndex, Smi::FromInt(0));
- result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex));
+ result->set(kTransitionsIndex, Smi::FromInt(0));
return result;
}
@@ -5969,7 +5876,8 @@
}
DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(new_size);
+ { SharedMode mode = remove_transitions ? MAY_BE_SHARED : CANNOT_BE_SHARED;
+ MaybeObject* maybe_result = Allocate(new_size, mode);
if (!maybe_result->To(&new_descriptors)) return maybe_result;
}
@@ -5986,6 +5894,10 @@
++enumeration_index;
}
}
+ Map* old_elements_transition = elements_transition_map();
+ if ((!remove_transitions) && (old_elements_transition != NULL)) {
+ new_descriptors->set_elements_transition_map(old_elements_transition);
+ }
new_descriptors->SetNextEnumerationIndex(enumeration_index);
// Copy the descriptors, filtering out transitions and null descriptors,
@@ -6009,6 +5921,8 @@
}
}
if (insertion_index < 0) insertion_index = to_index++;
+
+ ASSERT(insertion_index < new_descriptors->number_of_descriptors());
new_descriptors->Set(insertion_index, descriptor, witness);
ASSERT(to_index == new_descriptors->number_of_descriptors());
@@ -6018,14 +5932,15 @@
}
-MaybeObject* DescriptorArray::RemoveTransitions() {
+MaybeObject* DescriptorArray::RemoveTransitions(SharedMode shared_mode) {
// Allocate the new descriptor array.
int new_number_of_descriptors = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
if (IsProperty(i)) new_number_of_descriptors++;
}
DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(new_number_of_descriptors);
+ { MaybeObject* maybe_result = Allocate(new_number_of_descriptors,
+ shared_mode);
if (!maybe_result->To(&new_descriptors)) return maybe_result;
}
@@ -6110,42 +6025,37 @@
int DescriptorArray::BinarySearch(String* name, int low, int high) {
uint32_t hash = name->Hash();
+ int limit = high;
- while (low <= high) {
+ ASSERT(low <= high);
+
+ while (low != high) {
int mid = (low + high) / 2;
String* mid_name = GetKey(mid);
uint32_t mid_hash = mid_name->Hash();
- if (mid_hash > hash) {
- high = mid - 1;
- continue;
- }
- if (mid_hash < hash) {
+ if (mid_hash >= hash) {
+ high = mid;
+ } else {
low = mid + 1;
- continue;
}
- // Found an element with the same hash-code.
- ASSERT(hash == mid_hash);
- // There might be more, so we find the first one and
- // check them all to see if we have a match.
- if (name == mid_name && !IsNullDescriptor(mid)) return mid;
- while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
- for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
- if (GetKey(mid)->Equals(name) && !IsNullDescriptor(mid)) return mid;
- }
- break;
}
+
+ for (; low <= limit && GetKey(low)->Hash() == hash; ++low) {
+ if (GetKey(low)->Equals(name) && !IsNullDescriptor(low))
+ return low;
+ }
+
return kNotFound;
}
-int DescriptorArray::LinearSearch(String* name, int len) {
+int DescriptorArray::LinearSearch(SearchMode mode, String* name, int len) {
uint32_t hash = name->Hash();
for (int number = 0; number < len; number++) {
String* entry = GetKey(number);
- if ((entry->Hash() == hash) &&
- name->Equals(entry) &&
- !IsNullDescriptor(number)) {
+ if (mode == EXPECT_SORTED && entry->Hash() > hash) break;
+ if (name->Equals(entry) && !IsNullDescriptor(number)) {
return number;
}
}
@@ -7424,25 +7334,6 @@
case CONSTANT_TRANSITION:
keep_entry = !ClearBackPointer(heap, d->GetValue(i));
break;
- case ELEMENTS_TRANSITION: {
- Object* object = d->GetValue(i);
- if (object->IsMap()) {
- keep_entry = !ClearBackPointer(heap, object);
- } else {
- FixedArray* array = FixedArray::cast(object);
- for (int j = 0; j < array->length(); ++j) {
- Object* target = array->get(j);
- if (target->IsMap()) {
- if (ClearBackPointer(heap, target)) {
- array->set_undefined(j);
- } else {
- keep_entry = true;
- }
- }
- }
- }
- break;
- }
case CALLBACKS: {
Object* object = d->GetValue(i);
if (object->IsAccessorPair()) {
@@ -7609,14 +7500,64 @@
}
+MaybeObject* JSObject::OptimizeAsPrototype() {
+ if (IsGlobalObject()) return this;
+
+ // Make sure prototypes are fast objects and their maps have the bit set
+ // so they remain fast.
+ Map* proto_map = map();
+ if (!proto_map->used_for_prototype()) {
+ if (!HasFastProperties()) {
+ MaybeObject* new_proto = TransformToFastProperties(0);
+ if (new_proto->IsFailure()) return new_proto;
+ ASSERT(new_proto == this);
+ proto_map = map();
+ if (!proto_map->is_shared()) {
+ proto_map->set_used_for_prototype(true);
+ }
+ } else {
+ Heap* heap = GetHeap();
+ // We use the hole value as a singleton key in the prototype transition
+ // map so that we don't multiply the number of maps unnecessarily.
+ Map* new_map =
+ proto_map->GetPrototypeTransition(heap->the_hole_value());
+ if (new_map == NULL) {
+ MaybeObject* maybe_new_map =
+ proto_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
+ new_map->set_used_for_prototype(true);
+ MaybeObject* ok =
+ proto_map->PutPrototypeTransition(heap->the_hole_value(),
+ new_map);
+ if (ok->IsFailure()) return ok;
+ }
+ ASSERT(!proto_map->is_shared() && !new_map->is_shared());
+ set_map(new_map);
+ }
+ }
+ return this;
+}
+
+
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSReceiver());
Heap* heap = GetHeap();
+
+ // First some logic for the map of the prototype to make sure the
+ // used_for_prototype flag is set.
+ if (value->IsJSObject()) {
+ MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
+ if (ok->IsFailure()) return ok;
+ }
+
+ // Now some logic for the maps of the objects that are created by using this
+ // function as a constructor.
if (has_initial_map()) {
// If the function has allocated the initial map
// replace it with a copy containing the new prototype.
Map* new_map;
- MaybeObject* maybe_new_map = initial_map()->CopyDropTransitions();
+ MaybeObject* maybe_new_map =
+ initial_map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
new_map->set_prototype(value);
MaybeObject* maybe_object =
@@ -7646,7 +7587,8 @@
// Remove map transitions because they point to maps with a
// different prototype.
Map* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
+ { MaybeObject* maybe_new_map =
+ map()->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
}
Heap* heap = new_map->GetHeap();
@@ -8041,6 +7983,7 @@
code()->set_optimizable(true);
}
set_opt_count(0);
+ set_deopt_count(0);
}
}
@@ -8326,6 +8269,11 @@
}
+bool Code::allowed_in_shared_map_code_cache() {
+ return is_keyed_load_stub() || is_keyed_store_stub();
+}
+
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -8513,7 +8461,6 @@
case HANDLER: return "HANDLER";
case INTERCEPTOR: return "INTERCEPTOR";
case MAP_TRANSITION: return "MAP_TRANSITION";
- case ELEMENTS_TRANSITION: return "ELEMENTS_TRANSITION";
case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
}
@@ -8783,7 +8730,7 @@
}
-Object* Map::GetPrototypeTransition(Object* prototype) {
+Map* Map::GetPrototypeTransition(Object* prototype) {
FixedArray* cache = prototype_transitions();
int number_of_transitions = NumberOfProtoTransitions();
const int proto_offset =
@@ -8793,8 +8740,7 @@
for (int i = 0; i < number_of_transitions; i++) {
if (cache->get(proto_offset + i * step) == prototype) {
Object* map = cache->get(map_offset + i * step);
- ASSERT(map->IsMap());
- return map;
+ return Map::cast(map);
}
}
return NULL;
@@ -8902,21 +8848,27 @@
// Nothing to do if prototype is already set.
if (map->prototype() == value) return value;
- Object* new_map = map->GetPrototypeTransition(value);
+ if (value->IsJSObject()) {
+ MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
+ if (ok->IsFailure()) return ok;
+ }
+
+ Map* new_map = map->GetPrototypeTransition(value);
if (new_map == NULL) {
- { MaybeObject* maybe_new_map = map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+ { MaybeObject* maybe_new_map =
+ map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
}
{ MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, Map::cast(new_map));
+ map->PutPrototypeTransition(value, new_map);
if (maybe_new_cache->IsFailure()) return maybe_new_cache;
}
- Map::cast(new_map)->set_prototype(value);
+ new_map->set_prototype(value);
}
- ASSERT(Map::cast(new_map)->prototype() == value);
- real_receiver->set_map(Map::cast(new_map));
+ ASSERT(new_map->prototype() == value);
+ real_receiver->set_map(new_map);
heap->ClearInstanceofCache();
ASSERT(size == Size());
@@ -10071,6 +10023,12 @@
// Fall through.
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ if (IsJSArray()) {
+ *capacity = backing_store_base->length();
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+ // Fall through if packing is not guaranteed.
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
@@ -10087,6 +10045,12 @@
break;
}
case FAST_DOUBLE_ELEMENTS:
+ if (IsJSArray()) {
+ *capacity = backing_store_base->length();
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+ // Fall through if packing is not guaranteed.
case FAST_HOLEY_DOUBLE_ELEMENTS: {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
*capacity = elms->length();
@@ -11185,7 +11149,6 @@
switch (DetailsAt(entry).type()) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
return true;
case CALLBACKS: {
Object* value = ValueAt(entry);
@@ -12635,7 +12598,8 @@
// Allocate the instance descriptor.
DescriptorArray* descriptors;
{ MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
+ DescriptorArray::Allocate(instance_descriptor_length,
+ DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
return maybe_descriptors;
}
diff --git a/src/objects.h b/src/objects.h
index 0696afa..9aac37f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -41,6 +41,7 @@
#include "mips/constants-mips.h"
#endif
#include "v8checks.h"
+#include "zone.h"
//
@@ -169,6 +170,14 @@
};
+// Indicates whether the search function should expect a sorted or an unsorted
+// array as input.
+enum SearchMode {
+ EXPECT_SORTED,
+ EXPECT_UNSORTED
+};
+
+
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
@@ -1353,6 +1362,13 @@
FORCE_DELETION
};
+ // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
+ // a keyed store is of the form a[expression] = foo.
+ enum StoreFromKeyed {
+ MAY_BE_STORE_FROM_KEYED,
+ CERTAINLY_NOT_STORE_FROM_KEYED
+ };
+
// Casting.
static inline JSReceiver* cast(Object* obj);
@@ -1362,15 +1378,19 @@
PropertyAttributes attributes,
StrictModeFlag strict_mode);
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetProperty(
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
+ MUST_USE_RESULT MaybeObject* SetProperty(
+ LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
Object* value);
@@ -1524,7 +1544,8 @@
String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode);
MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
LookupResult* result,
String* name,
@@ -1583,6 +1604,8 @@
MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
+
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
@@ -1837,7 +1860,6 @@
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
- void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallback(String* name, LookupResult* result);
@@ -1920,9 +1942,11 @@
PropertyAttributes attributes);
// Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* AddFastProperty(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name,
@@ -1930,10 +1954,12 @@
PropertyAttributes attributes);
// Add a property to an object.
- MUST_USE_RESULT MaybeObject* AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* AddProperty(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2053,7 +2079,7 @@
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
- inline int MaxFastProperties();
+ inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -2075,7 +2101,8 @@
static const int kMaxUncheckedOldFastElementsLength = 500;
static const int kInitialMaxFastElementArray = 100000;
- static const int kMaxFastProperties = 12;
+ static const int kFastPropertiesSoftLimit = 12;
+ static const int kMaxFastProperties = 64;
static const int kMaxInstanceSize = 255 * kPointerSize;
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
@@ -2122,17 +2149,16 @@
bool check_prototype,
SetPropertyMode set_mode);
- // Searches the prototype chain for a callback setter and sets the property
- // with the setter if it finds one. The '*found' flag indicates whether
- // a setter was found or not.
- // This function can cause GC and can return a failure result with
- // '*found==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+ // Searches the prototype chain for property 'name'. If it is found and
+ // has a setter, invoke it and set '*done' to true. If it is found and is
+ // read-only, reject and set '*done' to true. Otherwise, set '*done' to
+ // false. Can cause GC and can return a failure result with '*done==true'.
+ MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
String* name,
Object* value,
PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ bool* done);
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
@@ -2400,10 +2426,15 @@
// map uses to encode additional bit fields when the descriptor array is not
// yet used.
inline bool IsEmpty();
+ inline bool MayContainTransitions();
+
+ DECL_ACCESSORS(elements_transition_map, Map)
// Returns the number of descriptors in the array.
int number_of_descriptors() {
- ASSERT(length() > kFirstIndex || IsEmpty());
+ ASSERT(length() > kFirstIndex ||
+ length() == kTransitionsIndex ||
+ IsEmpty());
int len = length();
return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kDescriptorSize;
}
@@ -2441,6 +2472,12 @@
kEnumerationIndexOffset);
}
+ Object** GetTransitionsSlot() {
+ ASSERT(elements_transition_map() != NULL);
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kTransitionsOffset);
+ }
+
// TODO(1399): It should be possible to make room for bit_field3 in the map
// without overloading the instance descriptors field in the map
// (and storing it in the DescriptorArray when the map has one).
@@ -2517,9 +2554,16 @@
MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
TransitionFlag transition_flag);
+ // Indicates whether the search function should expect a sorted or an unsorted
+ // descriptor array as input.
+ enum SharedMode {
+ MAY_BE_SHARED,
+ CANNOT_BE_SHARED
+ };
+
// Return a copy of the array with all transitions and null descriptors
// removed. Return a Failure object in case of an allocation failure.
- MUST_USE_RESULT MaybeObject* RemoveTransitions();
+ MUST_USE_RESULT MaybeObject* RemoveTransitions(SharedMode shared_mode);
// Sort the instance descriptors by the hash codes of their keys.
// Does not check for duplicates.
@@ -2530,11 +2574,11 @@
void Sort(const WhitenessWitness&);
// Search the instance descriptors for given name.
- inline int Search(String* name);
+ INLINE(int Search(String* name));
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- inline int SearchWithCache(String* name);
+ INLINE(int SearchWithCache(String* name));
// Tells whether the name is present int the array.
bool Contains(String* name) { return kNotFound != Search(name); }
@@ -2547,12 +2591,13 @@
// Perform a linear search in the instance descriptors represented
// by this fixed array. len is the number of descriptor indices that are
- // valid. Does not require the descriptors to be sorted.
- int LinearSearch(String* name, int len);
+ // valid.
+ int LinearSearch(SearchMode mode, String* name, int len);
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors);
+ MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
+ SharedMode shared_mode);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -2561,8 +2606,9 @@
static const int kNotFound = -1;
static const int kBitField3StorageIndex = 0;
- static const int kEnumerationIndexIndex = 1;
- static const int kFirstIndex = 2;
+ static const int kTransitionsIndex = 1;
+ static const int kEnumerationIndexIndex = 2;
+ static const int kFirstIndex = 3;
// The length of the "bridge" to the enum cache.
static const int kEnumCacheBridgeLength = 3;
@@ -2572,8 +2618,8 @@
// Layout description.
static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
- static const int kEnumerationIndexOffset = kBitField3StorageOffset +
- kPointerSize;
+ static const int kTransitionsOffset = kBitField3StorageOffset + kPointerSize;
+ static const int kEnumerationIndexOffset = kTransitionsOffset + kPointerSize;
static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
// Layout description for the bridge array.
@@ -3426,7 +3472,7 @@
// must be a symbol (canonicalized).
int FunctionContextSlotIndex(String* name, VariableMode* mode);
- static Handle<ScopeInfo> Create(Scope* scope);
+ static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
// Serializes empty scope info.
static ScopeInfo* Empty();
@@ -4310,6 +4356,8 @@
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
+ bool allowed_in_shared_map_code_cache();
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -4662,13 +4710,11 @@
}
inline bool has_external_array_elements() {
- ElementsKind kind(elements_kind());
- return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ return IsExternalArrayElementsKind(elements_kind());
}
inline bool has_dictionary_elements() {
- return elements_kind() == DICTIONARY_ELEMENTS;
+ return IsDictionaryElementsKind(elements_kind());
}
inline bool has_slow_elements_kind() {
@@ -4679,6 +4725,9 @@
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
+ inline Map* elements_transition_map();
+ inline void set_elements_transition_map(Map* transitioned_map);
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -4689,9 +4738,16 @@
// behavior. If true, the map should never be modified, instead a clone
// should be created and modified.
inline void set_is_shared(bool value);
-
inline bool is_shared();
+ // Tells whether the map is used for an object that is a prototype for another
+ // object or is the prototype on a function. Such maps are made faster by
+ // tweaking the heuristics that distinguish between regular object-oriented
+ // objects and the objects that are being used as hash maps. This flag is
+ // for optimization, not correctness.
+ inline void set_used_for_prototype(bool value);
+ inline bool used_for_prototype();
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -4775,7 +4831,8 @@
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- MUST_USE_RESULT MaybeObject* CopyDropTransitions();
+ MUST_USE_RESULT MaybeObject* CopyDropTransitions(
+ DescriptorArray::SharedMode shared_mode);
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -4828,23 +4885,15 @@
// The "shared" flags of both this map and |other| are ignored.
bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
- // Returns the contents of this map's descriptor array for the given string.
- // May return NULL. |safe_to_add_transition| is set to false and NULL
- // is returned if adding transitions is not allowed.
- Object* GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transitions);
-
// Returns the map that this map transitions to if its elements_kind
// is changed to |elements_kind|, or NULL if no such map is cached yet.
// |safe_to_add_transitions| is set to false if adding transitions is not
// allowed.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind,
- bool* safe_to_add_transition);
+ Map* LookupElementsTransitionMap(ElementsKind elements_kind);
- // Adds an entry to this map's descriptor array for a transition to
- // |transitioned_map| when its elements_kind is changed to |elements_kind|.
- MUST_USE_RESULT MaybeObject* AddElementsTransition(
- ElementsKind elements_kind, Map* transitioned_map);
+ // Adds a new transitions for changing the elements kind to |elements_kind|.
+ MUST_USE_RESULT MaybeObject* CreateNextElementsTransition(
+ ElementsKind elements_kind);
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or null handle if no match is
@@ -4880,9 +4929,18 @@
void TraverseTransitionTree(TraverseCallback callback, void* data);
+ // When you set the prototype of an object using the __proto__ accessor you
+ // need a new map for the object (the prototype is stored in the map). In
+ // order not to multiply maps unnecessarily we store these as transitions in
+ // the original map. That way we can transition to the same map if the same
+ // prototype is set, rather than creating a new map every time. The
+ // transitions are in the form of a map where the keys are prototype objects
+ // and the values are the maps the are transitioned to. The special key
+ // the_hole denotes the map we should transition to when the
+ // used_for_prototype flag is set.
static const int kMaxCachedPrototypeTransitions = 256;
- Object* GetPrototypeTransition(Object* prototype);
+ Map* GetPrototypeTransition(Object* prototype);
MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
Map* map);
@@ -4975,18 +5033,13 @@
// Bit positions for bit field 3
static const int kIsShared = 0;
static const int kFunctionWithPrototype = 1;
-
- // Layout of the default cache. It holds alternating name and code objects.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
+ static const int kUsedForPrototype = 2;
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset,
kSize> BodyDescriptor;
private:
- String* elements_transition_sentinel_name();
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -5359,8 +5412,8 @@
// A counter used to determine when to stress the deoptimizer with a
// deopt.
- inline int deopt_counter();
- inline void set_deopt_counter(int counter);
+ inline int stress_deopt_counter();
+ inline void set_stress_deopt_counter(int counter);
inline int profiler_ticks();
@@ -5488,9 +5541,26 @@
bool HasSourceCode();
Handle<Object> GetSourceCode();
+ // Number of times the function was optimized.
inline int opt_count();
inline void set_opt_count(int opt_count);
+ // Number of times the function was deoptimized.
+ inline void set_deopt_count(int value);
+ inline int deopt_count();
+ inline void increment_deopt_count();
+
+ // Number of time we tried to re-enable optimization after it
+ // was disabled due to high number of deoptimizations.
+ inline void set_opt_reenable_tries(int value);
+ inline int opt_reenable_tries();
+
+ inline void TryReenableOptimization();
+
+ // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
+ inline void set_counters(int value);
+ inline int counters();
+
// Source size of this function.
int SourceSize();
@@ -5547,13 +5617,14 @@
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
- // ic_age is a Smi field. It could be grouped with another Smi field into a
- // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
- static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
+ // ast_node_count is a Smi field. It could be grouped with another Smi field
+ // into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+ static const int kAstNodeCountOffset =
+ kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5571,12 +5642,11 @@
kCompilerHintsOffset + kPointerSize;
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kPointerSize;
- static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
- static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
-
+ static const int kCountersOffset = kOptCountOffset + kPointerSize;
+ static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
// Total size.
- static const int kSize = kDeoptCounterOffset + kPointerSize;
+ static const int kSize = kStressDeoptCounterOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -5588,7 +5658,7 @@
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -5612,11 +5682,11 @@
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kIntSize;
- static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
- static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
+ static const int kCountersOffset = kOptCountOffset + kIntSize;
+ static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
// Total size.
- static const int kSize = kDeoptCounterOffset + kIntSize;
+ static const int kSize = kStressDeoptCounterOffset + kIntSize;
#endif
@@ -5669,6 +5739,10 @@
kCompilerHintsCount // Pseudo entry
};
+ class DeoptCountBits: public BitField<int, 0, 4> {};
+ class OptReenableTriesBits: public BitField<int, 4, 18> {};
+ class ICAgeBits: public BitField<int, 22, 8> {};
+
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
@@ -7745,15 +7819,17 @@
Object* value,
StrictModeFlag strict_mode);
- // If the handler defines an accessor property, invoke its setter
- // (or throw if only a getter exists) and set *found to true. Otherwise false.
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+ // If the handler defines an accessor property with a setter, invoke it.
+ // If it defines an accessor property without a setter, or a data property
+ // that is read-only, throw. In all these cases set '*done' to true,
+ // otherwise set it to false.
+ MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
JSReceiver* receiver,
String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- bool* found);
+ bool* done);
MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
String* name,
@@ -8080,6 +8156,7 @@
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(expected_receiver_type, Object)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -8093,6 +8170,9 @@
inline PropertyAttributes property_attributes();
inline void set_property_attributes(PropertyAttributes attributes);
+ // Checks whether the given receiver is compatible with this accessor.
+ inline bool IsCompatibleReceiver(Object* receiver);
+
static inline AccessorInfo* cast(Object* obj);
#ifdef OBJECT_PRINT
@@ -8110,7 +8190,8 @@
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
private:
// Bit positions in flag.
diff --git a/src/parser.cc b/src/parser.cc
index 778527f..7c51b69 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -103,7 +103,7 @@
if (characters_ != NULL) {
RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
characters_ = NULL;
- text_.Add(atom);
+ text_.Add(atom, zone());
LAST(ADD_ATOM);
}
}
@@ -115,12 +115,12 @@
if (num_text == 0) {
return;
} else if (num_text == 1) {
- terms_.Add(text_.last());
+ terms_.Add(text_.last(), zone());
} else {
- RegExpText* text = new(zone()) RegExpText();
+ RegExpText* text = new(zone()) RegExpText(zone());
for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text);
- terms_.Add(text);
+ text_.Get(i)->AppendToText(text, zone());
+ terms_.Add(text, zone());
}
text_.Clear();
}
@@ -129,9 +129,9 @@
void RegExpBuilder::AddCharacter(uc16 c) {
pending_empty_ = false;
if (characters_ == NULL) {
- characters_ = new(zone()) ZoneList<uc16>(4);
+ characters_ = new(zone()) ZoneList<uc16>(4, zone());
}
- characters_->Add(c);
+ characters_->Add(c, zone());
LAST(ADD_CHAR);
}
@@ -148,10 +148,10 @@
}
if (term->IsTextElement()) {
FlushCharacters();
- text_.Add(term);
+ text_.Add(term, zone());
} else {
FlushText();
- terms_.Add(term);
+ terms_.Add(term, zone());
}
LAST(ADD_ATOM);
}
@@ -159,7 +159,7 @@
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
- terms_.Add(assert);
+ terms_.Add(assert, zone());
LAST(ADD_ASSERT);
}
@@ -178,9 +178,9 @@
} else if (num_terms == 1) {
alternative = terms_.last();
} else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList());
+ alternative = new(zone()) RegExpAlternative(terms_.GetList(zone()));
}
- alternatives_.Add(alternative);
+ alternatives_.Add(alternative, zone());
terms_.Clear();
LAST(ADD_NONE);
}
@@ -195,7 +195,7 @@
if (num_alternatives == 1) {
return alternatives_.last();
}
- return new(zone()) RegExpDisjunction(alternatives_.GetList());
+ return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
}
@@ -214,7 +214,7 @@
int num_chars = char_vector.length();
if (num_chars > 1) {
Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix));
+ text_.Add(new(zone()) RegExpAtom(prefix), zone());
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
characters_ = NULL;
@@ -233,7 +233,7 @@
if (min == 0) {
return;
}
- terms_.Add(atom);
+ terms_.Add(atom, zone());
return;
}
} else {
@@ -241,7 +241,7 @@
UNREACHABLE();
return;
}
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
+ terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom), zone());
LAST(ADD_TERM);
}
@@ -270,7 +270,7 @@
if (symbol_cache_.length() <= symbol_id) {
// Increase length to index + 1.
symbol_cache_.AddBlock(Handle<String>::null(),
- symbol_id + 1 - symbol_cache_.length());
+ symbol_id + 1 - symbol_cache_.length(), zone());
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
@@ -408,7 +408,7 @@
Scope* Parser::NewScope(Scope* parent, ScopeType type) {
- Scope* result = new(zone()) Scope(parent, type);
+ Scope* result = new(zone()) Scope(parent, type, zone());
result->Initialize();
return result;
}
@@ -538,7 +538,7 @@
ScriptDataImpl* pre_data,
Zone* zone)
: isolate_(script->GetIsolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
+ symbol_cache_(pre_data ? pre_data->symbol_count() : 0, zone),
script_(script),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
@@ -570,7 +570,7 @@
HistogramTimerScope timer(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- fni_ = new(zone()) FuncNameInferrer(isolate());
+ fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
source->TryFlatten();
@@ -609,7 +609,8 @@
if (info->is_eval()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!info->is_global() && (shared.is_null() || shared->is_function())) {
- scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+ scope = Scope::DeserializeScopeChain(*info->calling_context(), scope,
+ zone());
}
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
@@ -619,7 +620,7 @@
scope->set_end_position(source->length());
FunctionState function_state(this, scope, isolate());
top_scope_->SetLanguageMode(info->language_mode());
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
+ ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
@@ -696,7 +697,7 @@
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
- fni_ = new(zone()) FuncNameInferrer(isolate());
+ fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
fni_->PushEnclosingName(name);
mode_ = PARSE_EAGERLY;
@@ -709,7 +710,8 @@
Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
+ scope = Scope::DeserializeScopeChain(info->closure()->context(), scope,
+ zone());
}
FunctionState function_state(this, scope, isolate());
ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
@@ -944,12 +946,13 @@
// function contains only assignments of this type.
class ThisNamedPropertyAssignmentFinder : public ParserFinder {
public:
- explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
+ ThisNamedPropertyAssignmentFinder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
only_simple_this_property_assignments_(true),
- names_(0),
- assigned_arguments_(0),
- assigned_constants_(0) {
+ names_(0, zone),
+ assigned_arguments_(0, zone),
+ assigned_constants_(0, zone),
+ zone_(zone) {
}
void Update(Scope* scope, Statement* stat) {
@@ -1058,9 +1061,9 @@
return;
}
}
- names_.Add(name);
- assigned_arguments_.Add(index);
- assigned_constants_.Add(isolate_->factory()->undefined_value());
+ names_.Add(name, zone());
+ assigned_arguments_.Add(index, zone());
+ assigned_constants_.Add(isolate_->factory()->undefined_value(), zone());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
@@ -1072,9 +1075,9 @@
return;
}
}
- names_.Add(name);
- assigned_arguments_.Add(-1);
- assigned_constants_.Add(value);
+ names_.Add(name, zone());
+ assigned_arguments_.Add(-1, zone());
+ assigned_constants_.Add(value, zone());
}
void AssignmentFromSomethingElse() {
@@ -1086,17 +1089,20 @@
if (names_.capacity() == 0) {
ASSERT(assigned_arguments_.capacity() == 0);
ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4);
- assigned_arguments_.Initialize(4);
- assigned_constants_.Initialize(4);
+ names_.Initialize(4, zone());
+ assigned_arguments_.Initialize(4, zone());
+ assigned_constants_.Initialize(4, zone());
}
}
+ Zone* zone() const { return zone_; }
+
Isolate* isolate_;
bool only_simple_this_property_assignments_;
ZoneStringList names_;
ZoneList<int> assigned_arguments_;
ZoneObjectList assigned_constants_;
+ Zone* zone_;
};
@@ -1115,7 +1121,8 @@
ASSERT(processor != NULL);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
+ ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate(),
+ zone());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1173,7 +1180,7 @@
if (top_scope_->is_function_scope()) {
this_property_assignment_finder.Update(top_scope_, stat);
}
- processor->Add(stat);
+ processor->Add(stat, zone());
}
// Propagate the collected information on this property assignments.
@@ -1244,7 +1251,7 @@
// 'module' Identifier Module
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, zone());
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -1270,7 +1277,7 @@
// TODO(rossberg): Add initialization statement to block.
- if (names) names->Add(name);
+ if (names) names->Add(name, zone());
return block;
}
@@ -1307,7 +1314,7 @@
// '{' ModuleElement '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false);
+ Block* body = factory()->NewBlock(NULL, 16, false, zone());
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
@@ -1319,7 +1326,7 @@
{
BlockState block_state(this, scope);
- TargetCollector collector;
+ TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -1364,7 +1371,7 @@
PrintF("# Path .%s ", name->ToAsciiArray());
#endif
Module* member = factory()->NewModulePath(result, name);
- result->interface()->Add(name, member->interface(), ok);
+ result->interface()->Add(name, member->interface(), zone(), ok);
if (!*ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1395,7 +1402,8 @@
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
VariableProxy* proxy = top_scope_->NewUnresolved(
- factory(), name, scanner().location().beg_pos, Interface::NewModule());
+ factory(), name, scanner().location().beg_pos,
+ Interface::NewModule(zone()));
return factory()->NewModuleVariable(proxy);
}
@@ -1444,14 +1452,14 @@
// TODO(ES6): implement destructuring ImportSpecifiers
Expect(Token::IMPORT, CHECK_OK);
- ZoneStringList names(1);
+ ZoneStringList names(1, zone());
Handle<String> name = ParseIdentifierName(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
name = ParseIdentifierName(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
}
ExpectContextualKeyword("from", CHECK_OK);
@@ -1460,14 +1468,14 @@
// Generate a separate declaration for each identifier.
// TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, zone());
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Import %s ", names[i]->ToAsciiArray());
#endif
- Interface* interface = Interface::NewUnknown();
- module->interface()->Add(names[i], interface, ok);
+ Interface* interface = Interface::NewUnknown(zone());
+ module->interface()->Add(names[i], interface, zone(), ok);
if (!*ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1502,17 +1510,17 @@
Expect(Token::EXPORT, CHECK_OK);
Statement* result = NULL;
- ZoneStringList names(1);
+ ZoneStringList names(1, zone());
switch (peek()) {
case Token::IDENTIFIER: {
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsEqualTo(CStrVector("module"))) {
- names.Add(name);
+ names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
name = ParseIdentifier(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
result = factory()->NewEmptyStatement();
@@ -1545,8 +1553,10 @@
if (FLAG_print_interface_details)
PrintF("# Export %s ", names[i]->ToAsciiArray());
#endif
- Interface* inner = Interface::NewUnknown();
- interface->Add(names[i], inner, CHECK_OK);
+ Interface* inner = Interface::NewUnknown(zone());
+ interface->Add(names[i], inner, zone(), CHECK_OK);
+ if (!*ok)
+ return NULL;
VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
USE(proxy);
// TODO(rossberg): Rethink whether we actually need to store export
@@ -1673,7 +1683,7 @@
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false);
+ Block* result = factory()->NewBlock(labels, 1, false, zone());
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
if (statement) {
@@ -1872,7 +1882,7 @@
if (FLAG_print_interface_details)
PrintF("# Declare %s\n", var->name()->ToAsciiArray());
#endif
- proxy->interface()->Unify(var->interface(), &ok);
+ proxy->interface()->Unify(var->interface(), zone(), &ok);
if (!ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1971,7 +1981,7 @@
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
Declare(declaration, true, CHECK_OK);
- if (names) names->Add(name);
+ if (names) names->Add(name, zone());
return factory()->NewEmptyStatement();
}
@@ -1986,7 +1996,7 @@
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false);
+ Block* result = factory()->NewBlock(labels, 16, false, zone());
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -2009,14 +2019,14 @@
// '{' BlockElement* '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false);
+ Block* body = factory()->NewBlock(labels, 16, false, zone());
Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
block_scope->set_start_position(scanner().location().beg_pos);
{ BlockState block_state(this, block_scope);
- TargetCollector collector;
+ TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -2166,7 +2176,7 @@
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, zone());
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
@@ -2210,7 +2220,7 @@
*ok = false;
return NULL;
}
- if (names) names->Add(name);
+ if (names) names->Add(name, zone());
// Parse initialization expression if present and/or needed. A
// declaration of the form:
@@ -2289,13 +2299,14 @@
// properties defined in prototype objects.
if (initialization_scope->is_global_scope()) {
// Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(3, zone());
// We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name));
+ arguments->Add(factory()->NewLiteral(name), zone());
CallRuntime* initialize;
if (is_const) {
- arguments->Add(value);
+ arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeConstGlobal
@@ -2310,14 +2321,14 @@
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode));
+ arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
if (value != NULL && !inside_with()) {
- arguments->Add(value);
+ arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
}
@@ -2417,8 +2428,10 @@
*ok = false;
return NULL;
}
- if (labels == NULL) labels = new(zone()) ZoneStringList(4);
- labels->Add(label);
+ if (labels == NULL) {
+ labels = new(zone()) ZoneStringList(4, zone());
+ }
+ labels->Add(label, zone());
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
@@ -2630,12 +2643,13 @@
}
Expect(Token::COLON, CHECK_OK);
int pos = scanner().location().beg_pos;
- ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5);
+ ZoneList<Statement*>* statements =
+ new(zone()) ZoneList<Statement*>(5, zone());
while (peek() != Token::CASE &&
peek() != Token::DEFAULT &&
peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
- statements->Add(stat);
+ statements->Add(stat, zone());
}
return new(zone()) CaseClause(isolate(), label, statements, pos);
@@ -2656,11 +2670,11 @@
Expect(Token::RPAREN, CHECK_OK);
bool default_seen = false;
- ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4);
+ ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4, zone());
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause);
+ cases->Add(clause, zone());
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2701,7 +2715,7 @@
Expect(Token::TRY, CHECK_OK);
- TargetCollector try_collector;
+ TargetCollector try_collector(zone());
Block* try_block;
{ Target target(&this->target_stack_, &try_collector);
@@ -2719,7 +2733,7 @@
// then we will need to collect escaping targets from the catch
// block. Since we don't know yet if there will be a finally block, we
// always collect the targets.
- TargetCollector catch_collector;
+ TargetCollector catch_collector(zone());
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
@@ -2773,7 +2787,7 @@
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block);
statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false);
+ try_block = factory()->NewBlock(NULL, 1, false, zone());
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2790,7 +2804,7 @@
int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
// Combine the jump targets of the try block and the possible catch block.
- try_collector.targets()->AddAll(*catch_collector.targets());
+ try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
result->set_escaping_targets(try_collector.targets());
@@ -2879,7 +2893,7 @@
Statement* body = ParseStatement(NULL, CHECK_OK);
loop->Initialize(each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result = factory()->NewBlock(NULL, 2, false, zone());
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
top_scope_ = saved_scope;
@@ -2925,7 +2939,7 @@
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false);
+ Block* body_block = factory()->NewBlock(NULL, 3, false, zone());
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
@@ -3014,7 +3028,7 @@
// for (; c; n) b
// }
ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result = factory()->NewBlock(NULL, 2, false, zone());
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
@@ -3459,7 +3473,7 @@
if (!stack->is_empty()) {
int last = stack->pop();
result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0), last);
+ result, new(zone()) ZoneList<Expression*>(0, zone()), last);
}
return result;
}
@@ -3649,7 +3663,7 @@
if (FLAG_print_interface_details)
PrintF("# Variable %s ", name->ToAsciiArray());
#endif
- Interface* interface = Interface::NewUnknown();
+ Interface* interface = Interface::NewUnknown(zone());
result = top_scope_->NewUnresolved(
factory(), name, scanner().location().beg_pos, interface);
break;
@@ -3749,7 +3763,7 @@
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4);
+ ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
@@ -3758,7 +3772,7 @@
} else {
elem = ParseAssignmentExpression(true, CHECK_OK);
}
- values->Add(elem);
+ values->Add(elem, zone());
if (peek() != Token::RBRACK) {
Expect(Token::COMMA, CHECK_OK);
}
@@ -4127,7 +4141,7 @@
// )*[','] '}'
ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4);
+ new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
int number_of_boilerplate_properties = 0;
bool has_function = false;
@@ -4164,7 +4178,7 @@
}
// Validate the property.
checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
+ properties->Add(property, zone());
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
if (fni_ != NULL) {
@@ -4232,7 +4246,7 @@
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
// Validate the property
checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
+ properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
@@ -4291,12 +4305,12 @@
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4);
+ ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LPAREN, CHECK_OK);
bool done = (peek() == Token::RPAREN);
while (!done) {
Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument);
+ result->Add(argument, zone());
if (result->length() > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_arguments",
Vector<const char*>::empty());
@@ -4585,7 +4599,7 @@
}
if (!is_lazily_compiled) {
- body = new(zone()) ZoneList<Statement*>(8);
+ body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
VariableProxy* fproxy =
top_scope_->NewUnresolved(factory(), function_name);
@@ -4594,7 +4608,8 @@
factory()->NewAssignment(fvar_init_op,
fproxy,
factory()->NewThisFunction(),
- RelocInfo::kNoPosition)));
+ RelocInfo::kNoPosition)),
+ zone());
}
ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
@@ -4993,7 +5008,7 @@
// the break target to any TargetCollectors passed on the stack.
for (Target* t = target_stack_; t != stop; t = t->previous()) {
TargetCollector* collector = t->node()->AsTargetCollector();
- if (collector != NULL) collector->AddTarget(target);
+ if (collector != NULL) collector->AddTarget(target, zone());
}
}
@@ -5040,9 +5055,9 @@
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
elements, FAST_ELEMENTS, TENURED);
- ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
- args->Add(factory()->NewLiteral(type));
- args->Add(factory()->NewLiteral(array));
+ ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewLiteral(type), zone());
+ args->Add(factory()->NewLiteral(array), zone());
CallRuntime* call_constructor =
factory()->NewCallRuntime(constructor, NULL, args);
return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
@@ -5236,8 +5251,8 @@
Advance();
// everything except \x0a, \x0d, \u2028 and \u2029
ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape('.', ranges);
+ new(zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape('.', ranges, zone());
RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
@@ -5263,12 +5278,12 @@
Advance(2);
} else {
if (captures_ == NULL) {
- captures_ = new(zone()) ZoneList<RegExpCapture*>(2);
+ captures_ = new(zone()) ZoneList<RegExpCapture*>(2, zone());
}
if (captures_started() >= kMaxCaptures) {
ReportError(CStrVector("Too many captures") CHECK_FAILED);
}
- captures_->Add(NULL);
+ captures_->Add(NULL, zone());
}
// Store current state and begin new disjunction parsing.
stored_state = new(zone()) RegExpParserState(stored_state, type,
@@ -5306,8 +5321,8 @@
uc32 c = Next();
Advance(2);
ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(c, ranges);
+ new(zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape(c, ranges, zone());
RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
@@ -5782,11 +5797,12 @@
// escape (i.e., 's' means whitespace, from '\s').
static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
uc16 char_class,
- CharacterRange range) {
+ CharacterRange range,
+ Zone* zone) {
if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges);
+ CharacterRange::AddClassEscape(char_class, ranges, zone);
} else {
- ranges->Add(range);
+ ranges->Add(range, zone);
}
}
@@ -5802,7 +5818,8 @@
is_negated = true;
Advance();
}
- ZoneList<CharacterRange>* ranges = new(zone()) ZoneList<CharacterRange>(2);
+ ZoneList<CharacterRange>* ranges =
+ new(zone()) ZoneList<CharacterRange>(2, zone());
while (has_more() && current() != ']') {
uc16 char_class = kNoCharClass;
CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
@@ -5813,25 +5830,25 @@
// following code report an error.
break;
} else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
break;
}
uc16 char_class_2 = kNoCharClass;
CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
// Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
- AddRangeOrEscape(ranges, char_class_2, next);
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ AddRangeOrEscape(ranges, char_class_2, next, zone());
continue;
}
if (first.from() > next.to()) {
return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
}
- ranges->Add(CharacterRange::Range(first.from(), next.to()));
+ ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
} else {
- AddRangeOrEscape(ranges, char_class, first);
+ AddRangeOrEscape(ranges, char_class, first, zone());
}
}
if (!has_more()) {
@@ -5839,7 +5856,7 @@
}
Advance();
if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything());
+ ranges->Add(CharacterRange::Everything(), zone());
is_negated = !is_negated;
}
return new(zone()) RegExpCharacterClass(ranges, is_negated);
diff --git a/src/parser.h b/src/parser.h
index 2a79b63..773d59a 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -200,12 +200,12 @@
// Adds element at end of list. This element is buffered and can
// be read using last() or removed using RemoveLast until a new Add or until
// RemoveLast or GetList has been called.
- void Add(T* value) {
+ void Add(T* value, Zone* zone) {
if (last_ != NULL) {
if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
+ list_ = new(zone) ZoneList<T*>(initial_size, zone);
}
- list_->Add(last_);
+ list_->Add(last_, zone);
}
last_ = value;
}
@@ -250,12 +250,12 @@
return length + ((last_ == NULL) ? 0 : 1);
}
- ZoneList<T*>* GetList() {
+ ZoneList<T*>* GetList(Zone* zone) {
if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
+ list_ = new(zone) ZoneList<T*>(initial_size, zone);
}
if (last_ != NULL) {
- list_->Add(last_);
+ list_->Add(last_, zone);
last_ = NULL;
}
return list_;
@@ -285,7 +285,7 @@
void FlushCharacters();
void FlushText();
void FlushTerms();
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
Zone* zone_;
bool pending_empty_;
@@ -371,7 +371,7 @@
int disjunction_capture_index,
Zone* zone)
: previous_state_(previous_state),
- builder_(new RegExpBuilder(zone)),
+ builder_(new(zone) RegExpBuilder(zone)),
group_type_(group_type),
disjunction_capture_index_(disjunction_capture_index) {}
// Parser state of containing expression, if any.
@@ -398,7 +398,7 @@
};
Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() const { return isolate_->zone(); }
uc32 current() { return current_; }
bool has_more() { return has_more_; }
@@ -548,7 +548,7 @@
ZoneScope* zone_scope);
Isolate* isolate() { return isolate_; }
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info,
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 69ef082..ca19f4a 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -169,6 +169,15 @@
}
+size_t StringsStorage::GetUsedMemorySize() const {
+ size_t size = sizeof(*this);
+ size += sizeof(HashMap::Entry) * names_.capacity();
+ for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
+ }
+ return size;
+}
+
const char* const CodeEntry::kEmptyNamePrefix = "";
@@ -1083,12 +1092,16 @@
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapSnapshotsCollectionSize = 96;
+ static const int kExpectedHeapSnapshotSize = 136;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapSnapshotsCollectionSize = 144;
+ static const int kExpectedHeapSnapshotSize = 168;
static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB;
};
@@ -1243,12 +1256,15 @@
template<typename T, class P>
static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.capacity() * sizeof(T);
+ return list.length() * sizeof(T) + sizeof(list);
}
size_t HeapSnapshot::RawSnapshotSize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
+ sizeof(HeapSnapshot)); // NOLINT
return
+ sizeof(*this) +
GetMemoryUsedByList(entries_) +
GetMemoryUsedByList(edges_) +
GetMemoryUsedByList(children_) +
@@ -1446,6 +1462,15 @@
}
+size_t HeapObjectsMap::GetUsedMemorySize() const {
+ return
+ sizeof(*this) +
+ sizeof(HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(time_intervals_);
+}
+
+
HeapSnapshotsCollection::HeapSnapshotsCollection()
: is_tracking_objects_(false),
snapshots_uids_(HeapSnapshotsMatch),
@@ -1525,6 +1550,22 @@
}
+size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
+ kExpectedHeapSnapshotsCollectionSize ==
+ sizeof(HeapSnapshotsCollection)); // NOLINT
+ size_t size = sizeof(*this);
+ size += names_.GetUsedMemorySize();
+ size += ids_.GetUsedMemorySize();
+ size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
+}
+
+
HeapEntriesMap::HeapEntriesMap()
: entries_(HeapThingsMatch) {
}
@@ -2178,7 +2219,6 @@
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
case MAP_TRANSITION: // we do not care about transitions here...
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR: // ... and not about "holes"
break;
@@ -3248,7 +3288,6 @@
void HeapSnapshotJSONSerializer::SerializeImpl() {
- List<HeapEntry>& nodes = snapshot_->entries();
ASSERT(0 == snapshot_->root()->index());
writer_->AddCharacter('{');
writer_->AddString("\"snapshot\":{");
@@ -3256,11 +3295,11 @@
if (writer_->aborted()) return;
writer_->AddString("},\n");
writer_->AddString("\"nodes\":[");
- SerializeNodes(nodes);
+ SerializeNodes();
if (writer_->aborted()) return;
writer_->AddString("],\n");
writer_->AddString("\"edges\":[");
- SerializeEdges(nodes);
+ SerializeEdges();
if (writer_->aborted()) return;
writer_->AddString("],\n");
writer_->AddString("\"strings\":[");
@@ -3302,9 +3341,9 @@
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
- // The buffer needs space for 3 ints, 3 commas and \0
+ // The buffer needs space for 3 unsigned ints, 3 commas and \0
static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1; // NOLINT
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 1; // NOLINT
EmbeddedVector<char, kBufferSize> buffer;
int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden
@@ -3324,25 +3363,21 @@
}
-void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
- bool first_edge = true;
- for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
- Vector<HeapGraphEdge*> children = entry->children();
- for (int j = 0; j < children.length(); ++j) {
- SerializeEdge(children[j], first_edge);
- first_edge = false;
- if (writer_->aborted()) return;
- }
+void HeapSnapshotJSONSerializer::SerializeEdges() {
+ List<HeapGraphEdge*>& edges = snapshot_->children();
+ for (int i = 0; i < edges.length(); ++i) {
+ ASSERT(i == 0 ||
+ edges[i - 1]->from()->index() <= edges[i]->from()->index());
+ SerializeEdge(edges[i], i == 0);
+ if (writer_->aborted()) return;
}
}
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry,
- int edges_index) {
- // The buffer needs space for 5 uint32_t, 5 commas, \n and \0
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+ // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT
+ 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 5 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
@@ -3357,19 +3392,17 @@
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edges_index, buffer, buffer_pos);
+ buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
-void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) {
- int edges_index = 0;
- for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
- SerializeNode(entry, edges_index);
- edges_index += entry->children().length() * kEdgeFieldsCount;
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+ List<HeapEntry>& entries = snapshot_->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ SerializeNode(&entries[i]);
if (writer_->aborted()) return;
}
}
@@ -3393,7 +3426,7 @@
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
- JSON_S("edges_index")) ","
+ JSON_S("edge_count")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
JSON_S("hidden") ","
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 6388985..d56d874 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -72,6 +72,7 @@
const char* GetName(int index);
inline const char* GetFunctionName(String* name);
inline const char* GetFunctionName(const char* name);
+ size_t GetUsedMemorySize() const;
private:
static const int kMaxNameSize = 1024;
@@ -650,6 +651,7 @@
void StopHeapObjectsTracking();
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ size_t GetUsedMemorySize() const;
static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
@@ -734,6 +736,7 @@
SnapshotObjectId last_assigned_id() const {
return ids_.last_assigned_id();
}
+ size_t GetUsedMemorySize() const;
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
@@ -1072,10 +1075,10 @@
int GetStringId(const char* s);
int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges(const List<HeapEntry>& nodes);
+ void SerializeEdges();
void SerializeImpl();
- void SerializeNode(HeapEntry* entry, int edges_index);
- void SerializeNodes(const List<HeapEntry>& nodes);
+ void SerializeNode(HeapEntry* entry);
+ void SerializeNodes();
void SerializeSnapshot();
void SerializeString(const unsigned char* s);
void SerializeStrings();
diff --git a/src/property-details.h b/src/property-details.h
index c79aa96..a623fe9 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -63,9 +63,8 @@
INTERCEPTOR = 5, // only in lookup results, not in descriptors
// All properties before MAP_TRANSITION are real.
MAP_TRANSITION = 6, // only in fast mode
- ELEMENTS_TRANSITION = 7,
- CONSTANT_TRANSITION = 8, // only in fast mode
- NULL_DESCRIPTOR = 9, // only in fast mode
+ CONSTANT_TRANSITION = 7, // only in fast mode
+ NULL_DESCRIPTOR = 8, // only in fast mode
// There are no IC stubs for NULL_DESCRIPTORS. Therefore,
// NULL_DESCRIPTOR can be used as the type flag for IC stubs for
// nonexistent properties.
diff --git a/src/property.cc b/src/property.cc
index 78f237d..8c69541 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -61,12 +61,6 @@
GetTransitionMap()->Print(out);
PrintF(out, "\n");
break;
- case ELEMENTS_TRANSITION:
- PrintF(out, " -type = elements transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
case CONSTANT_FUNCTION:
PrintF(out, " -type = constant function\n");
PrintF(out, " -function:\n");
@@ -118,7 +112,6 @@
switch (details_.type()) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
return true;
case CALLBACKS: {
if (!value_->IsAccessorPair()) return false;
diff --git a/src/property.h b/src/property.h
index ba5e3c8..aa851f1 100644
--- a/src/property.h
+++ b/src/property.h
@@ -111,14 +111,6 @@
: Descriptor(key, map, attributes, MAP_TRANSITION) { }
};
-class ElementsTransitionDescriptor: public Descriptor {
- public:
- ElementsTransitionDescriptor(String* key,
- Object* map_or_array)
- : Descriptor(key, map_or_array, PropertyDetails(NONE,
- ELEMENTS_TRANSITION)) { }
-};
-
// Marks a field name in a map so that adding the field is guaranteed
// to create a FIELD descriptor in the new map. Used after adding
// a constant function the first time, creating a CONSTANT_FUNCTION
@@ -180,7 +172,6 @@
AccessorPair::cast(callback_object)->ContainsAccessor());
}
case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
return false;
@@ -311,7 +302,6 @@
Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(type() == MAP_TRANSITION ||
- type() == ELEMENTS_TRANSITION ||
type() == CONSTANT_TRANSITION);
return Map::cast(GetValue());
}
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index e582359..f878e8c 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -35,6 +35,7 @@
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
+ RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
ASSERT(type < 5);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index fe98c8e..a4719b5 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -35,9 +35,10 @@
namespace v8 {
namespace internal {
-RegExpMacroAssembler::RegExpMacroAssembler()
+RegExpMacroAssembler::RegExpMacroAssembler(Zone* zone)
: slow_safe_compiler_(false),
- global_mode_(NOT_GLOBAL) {
+ global_mode_(NOT_GLOBAL),
+ zone_(zone) {
}
@@ -56,8 +57,8 @@
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler()
- : RegExpMacroAssembler() {
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
+ : RegExpMacroAssembler(zone) {
}
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 7621d49..bcf3673 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -63,7 +63,7 @@
kCheckStackLimit = true
};
- RegExpMacroAssembler();
+ explicit RegExpMacroAssembler(Zone* zone);
virtual ~RegExpMacroAssembler();
// The maximal number of pushes between stack checks. Users must supply
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
@@ -189,9 +189,12 @@
return global_mode_ == GLOBAL;
}
+ Zone* zone() const { return zone_; }
+
private:
bool slow_safe_compiler_;
bool global_mode_;
+ Zone* zone_;
};
@@ -213,7 +216,7 @@
// capture positions.
enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
- NativeRegExpMacroAssembler();
+ explicit NativeRegExpMacroAssembler(Zone* zone);
virtual ~NativeRegExpMacroAssembler();
virtual bool CanReadUnaligned();
diff --git a/src/rewriter.cc b/src/rewriter.cc
index e58ddb4..3fcd603 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -262,7 +262,7 @@
Statement* result_statement =
processor.factory()->NewReturnStatement(result_proxy);
result_statement->set_statement_pos(position);
- body->Add(result_statement);
+ body->Add(result_statement, info->isolate()->zone());
}
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 568e48e..003b882 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,13 +65,20 @@
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If the function optimization was disabled due to high deoptimization count,
+// but the function is hot and has been seen on the stack this number of times,
+// then we try to reenable optimization for this function.
+static const int kProfilerTicksBeforeReenablingOptimization = 250;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
+STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt = 500;
@@ -263,7 +270,9 @@
}
}
- Code* shared_code = function->shared()->code();
+ SharedFunctionInfo* shared = function->shared();
+ Code* shared_code = shared->code();
+
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsMarkedForLazyRecompilation()) {
@@ -273,20 +282,34 @@
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
- // Do not record non-optimizable functions.
- if (!function->IsOptimizable()) continue;
- if (function->shared()->optimization_disabled()) continue;
-
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
- if (function->shared()->is_toplevel()
- && (frame_count > 1
- || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
+ if (shared->is_toplevel() &&
+ (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
continue;
}
+ // Do not record non-optimizable functions.
+ if (shared->optimization_disabled()) {
+ if (shared->deopt_count() >= Compiler::kDefaultMaxOptCount) {
+ // If optimization was disabled due to many deoptimizations,
+ // then check if the function is hot and try to reenable optimization.
+ int ticks = shared_code->profiler_ticks();
+ if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+ shared_code->set_profiler_ticks(0);
+ shared->TryReenableOptimization();
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+ }
+ continue;
+ }
+ if (!function->IsOptimizable()) continue;
+
+
+
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
diff --git a/src/runtime.cc b/src/runtime.cc
index 89f5025..9e38949 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1236,7 +1236,8 @@
if (needs_access_checks) {
// Copy map so it won't interfere constructor's initial map.
Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
+ { MaybeObject* maybe_new_map =
+ old_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
}
@@ -1254,7 +1255,8 @@
if (!old_map->is_access_check_needed()) {
// Copy map so it won't interfere constructor's initial map.
Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
+ { MaybeObject* maybe_new_map =
+ old_map->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED);
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
}
@@ -2175,60 +2177,58 @@
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
Handle<Object> code = args.at<Object>(1);
- Handle<Context> context(target->context());
+ if (code->IsNull()) return *target;
+ RUNTIME_ASSERT(code->IsJSFunction());
+ Handle<JSFunction> source = Handle<JSFunction>::cast(code);
+ Handle<SharedFunctionInfo> target_shared(target->shared());
+ Handle<SharedFunctionInfo> source_shared(source->shared());
- if (!code->IsNull()) {
- RUNTIME_ASSERT(code->IsJSFunction());
- Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
- Handle<SharedFunctionInfo> shared(fun->shared());
-
- if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- // Since we don't store the source for this we should never
- // optimize this.
- shared->code()->set_optimizable(false);
- // Set the code, scope info, formal parameter count,
- // and the length of the target function.
- target->shared()->set_code(shared->code());
- target->ReplaceCode(shared->code());
- target->shared()->set_scope_info(shared->scope_info());
- target->shared()->set_length(shared->length());
- target->shared()->set_formal_parameter_count(
- shared->formal_parameter_count());
- // Set the source code of the target function to undefined.
- // SetCode is only used for built-in constructors like String,
- // Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
- target->shared()->set_script(isolate->heap()->undefined_value());
- target->shared()->code()->set_optimizable(false);
- // Clear the optimization hints related to the compiled code as these are no
- // longer valid when the code is overwritten.
- target->shared()->ClearThisPropertyAssignmentsInfo();
- context = Handle<Context>(fun->context());
-
- // Make sure we get a fresh copy of the literal vector to avoid
- // cross context contamination.
- int number_of_literals = fun->NumberOfLiterals();
- Handle<FixedArray> literals =
- isolate->factory()->NewFixedArray(number_of_literals, TENURED);
- if (number_of_literals > 0) {
- // Insert the object, regexp and array functions in the literals
- // array prefix. These are the functions that will be used when
- // creating object, regexp and array literals.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
- }
- target->set_literals(*literals);
- target->set_next_function_link(isolate->heap()->undefined_value());
-
- if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
- isolate->logger()->LogExistingFunction(
- shared, Handle<Code>(shared->code()));
- }
+ if (!source->is_compiled() &&
+ !JSFunction::CompileLazy(source, KEEP_EXCEPTION)) {
+ return Failure::Exception();
}
+ // Set the code, scope info, formal parameter count, and the length
+ // of the target shared function info. Set the source code of the
+ // target function to undefined. SetCode is only used for built-in
+ // constructors like String, Array, and Object, and some web code
+ // doesn't like seeing source code for constructors.
+ target_shared->set_code(source_shared->code());
+ target_shared->set_scope_info(source_shared->scope_info());
+ target_shared->set_length(source_shared->length());
+ target_shared->set_formal_parameter_count(
+ source_shared->formal_parameter_count());
+ target_shared->set_script(isolate->heap()->undefined_value());
+
+ // Since we don't store the source we should never optimize this.
+ target_shared->code()->set_optimizable(false);
+
+ // Clear the optimization hints related to the compiled code as these
+ // are no longer valid when the code is overwritten.
+ target_shared->ClearThisPropertyAssignmentsInfo();
+
+ // Set the code of the target function.
+ target->ReplaceCode(source_shared->code());
+
+ // Make sure we get a fresh copy of the literal vector to avoid cross
+ // context contamination.
+ Handle<Context> context(source->context());
+ int number_of_literals = source->NumberOfLiterals();
+ Handle<FixedArray> literals =
+ isolate->factory()->NewFixedArray(number_of_literals, TENURED);
+ if (number_of_literals > 0) {
+ literals->set(JSFunction::kLiteralGlobalContextIndex,
+ context->global_context());
+ }
target->set_context(*context);
+ target->set_literals(*literals);
+ target->set_next_function_link(isolate->heap()->undefined_value());
+
+ if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
+ isolate->logger()->LogExistingFunction(
+ source_shared, Handle<Code>(source_shared->code()));
+ }
+
return *target;
}
@@ -2528,8 +2528,10 @@
class CompiledReplacement {
public:
- CompiledReplacement()
- : parts_(1), replacement_substrings_(0), simple_hint_(false) {}
+ explicit CompiledReplacement(Zone* zone)
+ : parts_(1, zone), replacement_substrings_(0, zone),
+ simple_hint_(false),
+ zone_(zone) {}
void Compile(Handle<String> replacement,
int capture_count,
@@ -2549,6 +2551,8 @@
return simple_hint_;
}
+ Zone* zone() const { return zone_; }
+
private:
enum PartType {
SUBJECT_PREFIX = 1,
@@ -2610,7 +2614,8 @@
static bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
Vector<Char> characters,
int capture_count,
- int subject_length) {
+ int subject_length,
+ Zone* zone) {
int length = characters.length();
int last = 0;
for (int i = 0; i < length; i++) {
@@ -2625,7 +2630,8 @@
case '$':
if (i > last) {
// There is a substring before. Include the first "$".
- parts->Add(ReplacementPart::ReplacementSubString(last, next_index));
+ parts->Add(ReplacementPart::ReplacementSubString(last, next_index),
+ zone);
last = next_index + 1; // Continue after the second "$".
} else {
// Let the next substring start with the second "$".
@@ -2635,25 +2641,25 @@
break;
case '`':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectPrefix());
+ parts->Add(ReplacementPart::SubjectPrefix(), zone);
i = next_index;
last = i + 1;
break;
case '\'':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectSuffix(subject_length));
+ parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
i = next_index;
last = i + 1;
break;
case '&':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectMatch());
+ parts->Add(ReplacementPart::SubjectMatch(), zone);
i = next_index;
last = i + 1;
break;
@@ -2686,10 +2692,10 @@
}
if (capture_ref > 0) {
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
ASSERT(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref));
+ parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
last = next_index + 1;
}
i = next_index;
@@ -2703,10 +2709,10 @@
}
if (length > last) {
if (last == 0) {
- parts->Add(ReplacementPart::ReplacementString());
+ parts->Add(ReplacementPart::ReplacementString(), zone);
return true;
} else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length));
+ parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
}
}
return false;
@@ -2715,6 +2721,7 @@
ZoneList<ReplacementPart> parts_;
ZoneList<Handle<String> > replacement_substrings_;
bool simple_hint_;
+ Zone* zone_;
};
@@ -2729,13 +2736,15 @@
simple_hint_ = ParseReplacementPattern(&parts_,
content.ToAsciiVector(),
capture_count,
- subject_length);
+ subject_length,
+ zone());
} else {
ASSERT(content.IsTwoByte());
simple_hint_ = ParseReplacementPattern(&parts_,
content.ToUC16Vector(),
capture_count,
- subject_length);
+ subject_length,
+ zone());
}
}
Isolate* isolate = replacement->GetIsolate();
@@ -2747,12 +2756,12 @@
int from = -tag;
int to = parts_[i].data;
replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to));
+ isolate->factory()->NewSubString(replacement, from, to), zone());
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
} else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement);
+ replacement_substrings_.Add(replacement, zone());
parts_[i].data = substring_index;
substring_index++;
}
@@ -2801,7 +2810,8 @@
void FindAsciiStringIndices(Vector<const char> subject,
char pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
ASSERT(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
@@ -2812,7 +2822,7 @@
pos = reinterpret_cast<const char*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
- indices->Add(static_cast<int>(pos - subject_start));
+ indices->Add(static_cast<int>(pos - subject_start), zone);
pos++;
limit--;
}
@@ -2824,7 +2834,8 @@
Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
ASSERT(limit > 0);
// Collect indices of pattern in subject.
// Stop after finding at most limit values.
@@ -2834,7 +2845,7 @@
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
- indices->Add(index);
+ indices->Add(index, zone);
index += pattern_length;
limit--;
}
@@ -2845,7 +2856,8 @@
String* subject,
String* pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
{
AssertNoAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
@@ -2860,20 +2872,23 @@
FindAsciiStringIndices(subject_vector,
pattern_vector[0],
indices,
- limit);
+ limit,
+ zone);
} else {
FindStringIndices(isolate,
subject_vector,
pattern_vector,
indices,
- limit);
+ limit,
+ zone);
}
} else {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToUC16Vector(),
indices,
- limit);
+ limit,
+ zone);
}
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
@@ -2882,13 +2897,15 @@
subject_vector,
pattern_content.ToAsciiVector(),
indices,
- limit);
+ limit,
+ zone);
} else {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToUC16Vector(),
indices,
- limit);
+ limit,
+ zone);
}
}
}
@@ -2967,12 +2984,13 @@
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
Handle<String> replacement,
- Handle<JSArray> last_match_info) {
+ Handle<JSArray> last_match_info,
+ Zone* zone) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> indices(8);
+ ZoneList<int> indices(8, isolate->zone());
ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -2980,7 +2998,8 @@
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff);
+ FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff,
+ zone);
int matches = indices.length();
if (matches == 0) return *subject;
@@ -3049,7 +3068,8 @@
String* subject,
JSRegExp* regexp,
String* replacement,
- JSArray* last_match_info) {
+ JSArray* last_match_info,
+ Zone* zone) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
@@ -3075,8 +3095,8 @@
int capture_count = regexp_handle->CaptureCount();
// CompiledReplacement uses zone allocation.
- ZoneScope zone(isolate, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement;
+ ZoneScope zonescope(isolate, DELETE_ON_EXIT);
+ CompiledReplacement compiled_replacement(isolate->zone());
compiled_replacement.Compile(replacement_handle,
capture_count,
length);
@@ -3094,14 +3114,16 @@
subject_handle,
regexp_handle,
replacement_handle,
- last_match_info_handle);
+ last_match_info_handle,
+ zone);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
isolate,
subject_handle,
regexp_handle,
replacement_handle,
- last_match_info_handle);
+ last_match_info_handle,
+ zone);
}
}
@@ -3184,7 +3206,8 @@
Isolate* isolate,
String* subject,
JSRegExp* regexp,
- JSArray* last_match_info) {
+ JSArray* last_match_info,
+ Zone* zone) {
ASSERT(subject->IsFlat());
HandleScope handles(isolate);
@@ -3203,14 +3226,16 @@
subject_handle,
regexp_handle,
empty_string_handle,
- last_match_info_handle);
+ last_match_info_handle,
+ zone);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
isolate,
subject_handle,
regexp_handle,
empty_string_handle,
- last_match_info_handle);
+ last_match_info_handle,
+ zone);
}
}
@@ -3369,13 +3394,14 @@
ASSERT(last_match_info->HasFastObjectElements());
+ Zone* zone = isolate->zone();
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
- isolate, subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info, zone);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
- isolate, subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info, zone);
}
}
@@ -3383,7 +3409,8 @@
subject,
regexp,
replacement,
- last_match_info);
+ last_match_info,
+ zone);
}
@@ -3717,8 +3744,9 @@
}
int length = subject->length();
+ Zone* zone = isolate->zone();
ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> offsets(8);
+ ZoneList<int> offsets(8, zone);
int start;
int end;
do {
@@ -3728,8 +3756,8 @@
start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
}
- offsets.Add(start);
- offsets.Add(end);
+ offsets.Add(start, zone);
+ offsets.Add(end, zone);
if (start == end) if (++end > length) break;
match = RegExpImpl::Exec(regexp, subject, end, regexp_info,
isolate->zone());
@@ -6435,17 +6463,18 @@
static const int kMaxInitialListCapacity = 16;
+ Zone* zone = isolate->zone();
ZoneScope scope(isolate, DELETE_ON_EXIT);
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity);
+ ZoneList<int> indices(initial_capacity, zone);
if (!pattern->IsFlat()) FlattenString(pattern);
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit);
+ FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length);
+ indices.Add(subject_length, zone);
}
// The list indices now contains the end of each part to create.
@@ -9284,13 +9313,14 @@
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Zone* zone = isolate->zone();
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
if (source->IsSeqAsciiString()) {
- result = JsonParser<true>::Parse(source);
+ result = JsonParser<true>::Parse(source, zone);
} else {
- result = JsonParser<false>::Parse(source);
+ result = JsonParser<false>::Parse(source, zone);
}
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
@@ -10259,7 +10289,6 @@
}
case INTERCEPTOR:
case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
return heap->undefined_value();
@@ -12746,7 +12775,8 @@
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
+ return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
+ isolate->zone());
}
// Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -13473,6 +13503,8 @@
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
+// Properties test sitting with elements tests - not fooling anyone.
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
diff --git a/src/runtime.h b/src/runtime.h
index fc0a472..f5a4f50 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -380,6 +380,7 @@
F(HasExternalUnsignedIntElements, 1, 1) \
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
+ F(HasFastProperties, 1, 1) \
F(TransitionElementsSmiToDouble, 1, 1) \
F(TransitionElementsDoubleToObject, 1, 1) \
F(HaveSameMap, 2, 1) \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index 89ad8af..714e5c3 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -116,8 +116,8 @@
}
-void Safepoint::DefinePointerRegister(Register reg) {
- registers_->Add(reg.code());
+void Safepoint::DefinePointerRegister(Register reg, Zone* zone) {
+ registers_->Add(reg.code(), zone);
}
@@ -131,15 +131,16 @@
info.pc = assembler->pc_offset();
info.arguments = arguments;
info.has_doubles = (kind & Safepoint::kWithDoubles);
- deoptimization_info_.Add(info);
- deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
+ deoptimization_info_.Add(info, zone_);
+ deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deopt_index_list_.length();
}
- indexes_.Add(new ZoneList<int>(8));
+ indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
registers_.Add((kind & Safepoint::kWithRegisters)
- ? new ZoneList<int>(4)
- : NULL);
+ ? new(zone_) ZoneList<int>(4, zone_)
+ : NULL,
+ zone_);
return Safepoint(indexes_.last(), registers_.last());
}
@@ -190,12 +191,12 @@
}
// Emit table of bitmaps.
- ZoneList<uint8_t> bits(bytes_per_entry);
+ ZoneList<uint8_t> bits(bytes_per_entry, zone_);
for (int i = 0; i < length; i++) {
ZoneList<int>* indexes = indexes_[i];
ZoneList<int>* registers = registers_[i];
bits.Clear();
- bits.AddBlock(0, bytes_per_entry);
+ bits.AddBlock(0, bytes_per_entry, zone_);
// Run through the registers (if any).
ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 2ce4320..307d948 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -184,7 +184,7 @@
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
void DefinePointerSlot(int index, Zone* zone) { indexes_->Add(index, zone); }
- void DefinePointerRegister(Register reg);
+ void DefinePointerRegister(Register reg, Zone* zone);
private:
Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index f50af30..25f02f6 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -38,10 +38,10 @@
namespace internal {
-Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Collect stack and context locals.
- ZoneList<Variable*> stack_locals(scope->StackLocalCount());
- ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+ ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
const int stack_local_count = stack_locals.length();
const int context_local_count = context_locals.length();
diff --git a/src/scopes.cc b/src/scopes.cc
index 2c61a75..ad6692e 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -57,7 +57,9 @@
}
-VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
+VariableMap::VariableMap(Zone* zone)
+ : ZoneHashMap(Match, 8, ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
VariableMap::~VariableMap() {}
@@ -69,24 +71,26 @@
Variable::Kind kind,
InitializationFlag initialization_flag,
Interface* interface) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
+ Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true,
+ ZoneAllocationPolicy(zone()));
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
- p->value = new Variable(scope,
- name,
- mode,
- is_valid_lhs,
- kind,
- initialization_flag,
- interface);
+ p->value = new(zone()) Variable(scope,
+ name,
+ mode,
+ is_valid_lhs,
+ kind,
+ initialization_flag,
+ interface);
}
return reinterpret_cast<Variable*>(p->value);
}
Variable* VariableMap::Lookup(Handle<String> name) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
+ Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false,
+ ZoneAllocationPolicy(NULL));
if (p != NULL) {
ASSERT(*reinterpret_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);
@@ -99,18 +103,19 @@
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Scope* outer_scope, ScopeType type)
+Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4),
+ inner_scopes_(4, zone),
+ variables_(zone),
+ temps_(4, zone),
+ params_(4, zone),
+ unresolved_(16, zone),
+ decls_(4, zone),
interface_(FLAG_harmony_modules &&
(type == MODULE_SCOPE || type == GLOBAL_SCOPE)
- ? Interface::NewModule() : NULL),
- already_resolved_(false) {
+ ? Interface::NewModule(zone) : NULL),
+ already_resolved_(false),
+ zone_(zone) {
SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
@@ -122,16 +127,18 @@
Scope::Scope(Scope* inner_scope,
ScopeType type,
- Handle<ScopeInfo> scope_info)
+ Handle<ScopeInfo> scope_info,
+ Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4),
+ inner_scopes_(4, zone),
+ variables_(zone),
+ temps_(4, zone),
+ params_(4, zone),
+ unresolved_(16, zone),
+ decls_(4, zone),
interface_(NULL),
- already_resolved_(true) {
+ already_resolved_(true),
+ zone_(zone) {
SetDefaults(type, NULL, scope_info);
if (!scope_info.is_null()) {
num_heap_slots_ = scope_info_->ContextLength();
@@ -143,16 +150,17 @@
}
-Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
+Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(1),
- variables_(),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0),
+ inner_scopes_(1, zone),
+ variables_(zone),
+ temps_(0, zone),
+ params_(0, zone),
+ unresolved_(0, zone),
+ decls_(0, zone),
interface_(NULL),
- already_resolved_(true) {
+ already_resolved_(true),
+ zone_(zone) {
SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
AddInnerScope(inner_scope);
++num_var_or_const_;
@@ -200,16 +208,18 @@
}
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
+ Zone* zone) {
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
while (!context->IsGlobalContext()) {
if (context->IsWithContext()) {
- Scope* with_scope = new Scope(current_scope,
- WITH_SCOPE,
- Handle<ScopeInfo>::null());
+ Scope* with_scope = new(zone) Scope(current_scope,
+ WITH_SCOPE,
+ Handle<ScopeInfo>::null(),
+ zone);
current_scope = with_scope;
// All the inner scopes are inside a with.
contains_with = true;
@@ -218,18 +228,21 @@
}
} else if (context->IsFunctionContext()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- current_scope = new Scope(current_scope,
- FUNCTION_SCOPE,
- Handle<ScopeInfo>(scope_info));
+ current_scope = new(zone) Scope(current_scope,
+ FUNCTION_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else if (context->IsBlockContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new Scope(current_scope,
- BLOCK_SCOPE,
- Handle<ScopeInfo>(scope_info));
+ current_scope = new(zone) Scope(current_scope,
+ BLOCK_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else {
ASSERT(context->IsCatchContext());
String* name = String::cast(context->extension());
- current_scope = new Scope(current_scope, Handle<String>(name));
+ current_scope = new(zone) Scope(
+ current_scope, Handle<String>(name), zone);
}
if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
@@ -305,7 +318,7 @@
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
- outer_scope_->inner_scopes_.Add(this);
+ outer_scope_->inner_scopes_.Add(this, zone());
scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
} else {
scope_inside_with_ = is_with_scope();
@@ -370,7 +383,7 @@
// Move unresolved variables
for (int i = 0; i < unresolved_.length(); i++) {
- outer_scope()->unresolved_.Add(unresolved_[i]);
+ outer_scope()->unresolved_.Add(unresolved_[i], zone());
}
return NULL;
@@ -401,13 +414,8 @@
init_flag = kCreatedInitialized;
}
- Variable* var =
- variables_.Declare(this,
- name,
- mode,
- true,
- Variable::NORMAL,
- init_flag);
+ Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ init_flag);
var->AllocateTo(location, index);
return var;
}
@@ -422,7 +430,7 @@
VariableMode mode;
int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
if (index < 0) return NULL;
- Variable* var = new Variable(
+ Variable* var = new(zone()) Variable(
this, name, mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
@@ -451,9 +459,9 @@
void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
ASSERT(!already_resolved());
ASSERT(is_function_scope());
- Variable* var = variables_.Declare(
- this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
- params_.Add(var);
+ Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ kCreatedInitialized);
+ params_.Add(var, zone());
}
@@ -500,19 +508,19 @@
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
- Variable* var = new Variable(this,
- name,
- TEMPORARY,
- true,
- Variable::NORMAL,
- kCreatedInitialized);
- temps_.Add(var);
+ Variable* var = new(zone()) Variable(this,
+ name,
+ TEMPORARY,
+ true,
+ Variable::NORMAL,
+ kCreatedInitialized);
+ temps_.Add(var, zone());
return var;
}
void Scope::AddDeclaration(Declaration* declaration) {
- decls_.Add(declaration);
+ decls_.Add(declaration, zone());
}
@@ -588,7 +596,7 @@
Variable* var = temps_[i];
if (var->is_used()) {
ASSERT(var->IsStackLocal());
- stack_locals->Add(var);
+ stack_locals->Add(var, zone());
}
}
@@ -599,9 +607,9 @@
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var->is_used()) {
if (var->IsStackLocal()) {
- stack_locals->Add(var);
+ stack_locals->Add(var, zone());
} else if (var->IsContextSlot()) {
- context_locals->Add(var);
+ context_locals->Add(var, zone());
}
}
}
@@ -699,7 +707,7 @@
Handle<ScopeInfo> Scope::GetScopeInfo() {
if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(this);
+ scope_info_ = ScopeInfo::Create(this, zone());
}
return scope_info_;
}
@@ -885,7 +893,7 @@
Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
- if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
+ if (dynamics_ == NULL) dynamics_ = new(zone()) DynamicScopePart(zone());
VariableMap* map = dynamics_->GetMap(mode);
Variable* var = map->Lookup(name);
if (var == NULL) {
@@ -1019,7 +1027,7 @@
if (FLAG_print_interface_details)
PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
#endif
- proxy->interface()->Unify(var->interface(), &ok);
+ proxy->interface()->Unify(var->interface(), zone(), &ok);
if (!ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
diff --git a/src/scopes.h b/src/scopes.h
index be6705b..decd74d 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -40,7 +40,7 @@
// A hash map to support fast variable declaration and lookup.
class VariableMap: public ZoneHashMap {
public:
- VariableMap();
+ explicit VariableMap(Zone* zone);
virtual ~VariableMap();
@@ -53,6 +53,11 @@
Interface* interface = Interface::NewValue());
Variable* Lookup(Handle<String> name);
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Zone* zone_;
};
@@ -62,14 +67,19 @@
// and setup time for scopes that don't need them.
class DynamicScopePart : public ZoneObject {
public:
+ explicit DynamicScopePart(Zone* zone) {
+ for (int i = 0; i < 3; i++)
+ maps_[i] = new(zone->New(sizeof(VariableMap))) VariableMap(zone);
+ }
+
VariableMap* GetMap(VariableMode mode) {
int index = mode - DYNAMIC;
ASSERT(index >= 0 && index < 3);
- return &maps_[index];
+ return maps_[index];
}
private:
- VariableMap maps_[3];
+ VariableMap *maps_[3];
};
@@ -87,14 +97,15 @@
// ---------------------------------------------------------------------------
// Construction
- Scope(Scope* outer_scope, ScopeType type);
+ Scope(Scope* outer_scope, ScopeType type, Zone* zone);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static bool Analyze(CompilationInfo* info);
- static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
+ static Scope* DeserializeScopeChain(Context* context, Scope* global_scope,
+ Zone* zone);
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
@@ -106,6 +117,8 @@
// tree and its children are reparented.
Scope* FinalizeBlockScope();
+ Zone* zone() const { return zone_; }
+
// ---------------------------------------------------------------------------
// Declarations
@@ -161,7 +174,7 @@
ASSERT(!already_resolved());
VariableProxy* proxy =
factory->NewVariableProxy(name, false, position, interface);
- unresolved_.Add(proxy);
+ unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -581,14 +594,15 @@
private:
// Construct a scope based on the scope info.
- Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
+ Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
+ Zone* zone);
// Construct a catch scope with a binding for the name.
- Scope(Scope* inner_scope, Handle<String> catch_variable_name);
+ Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone);
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
- inner_scopes_.Add(inner_scope);
+ inner_scopes_.Add(inner_scope, zone_);
inner_scope->outer_scope_ = this;
}
}
@@ -596,6 +610,8 @@
void SetDefaults(ScopeType type,
Scope* outer_scope,
Handle<ScopeInfo> scope_info);
+
+ Zone* zone_;
};
} } // namespace v8::internal
diff --git a/src/small-pointer-list.h b/src/small-pointer-list.h
index 75fea06..295a06f 100644
--- a/src/small-pointer-list.h
+++ b/src/small-pointer-list.h
@@ -44,22 +44,22 @@
public:
SmallPointerList() : data_(kEmptyTag) {}
- explicit SmallPointerList(int capacity) : data_(kEmptyTag) {
- Reserve(capacity);
+ SmallPointerList(int capacity, Zone* zone) : data_(kEmptyTag) {
+ Reserve(capacity, zone);
}
- void Reserve(int capacity) {
+ void Reserve(int capacity, Zone* zone) {
if (capacity < 2) return;
if ((data_ & kTagMask) == kListTag) {
if (list()->capacity() >= capacity) return;
int old_length = list()->length();
- list()->AddBlock(NULL, capacity - list()->capacity());
+ list()->AddBlock(NULL, capacity - list()->capacity(), zone);
list()->Rewind(old_length);
return;
}
- PointerList* list = new PointerList(capacity);
+ PointerList* list = new(zone) PointerList(capacity, zone);
if ((data_ & kTagMask) == kSingletonTag) {
- list->Add(single_value());
+ list->Add(single_value(), zone);
}
ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
data_ = reinterpret_cast<intptr_t>(list) | kListTag;
@@ -83,21 +83,21 @@
return list()->length();
}
- void Add(T* pointer) {
+ void Add(T* pointer, Zone* zone) {
ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
if ((data_ & kTagMask) == kEmptyTag) {
data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
return;
}
if ((data_ & kTagMask) == kSingletonTag) {
- PointerList* list = new PointerList(2);
- list->Add(single_value());
- list->Add(pointer);
+ PointerList* list = new(zone) PointerList(2, zone);
+ list->Add(single_value(), zone);
+ list->Add(pointer, zone);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
data_ = reinterpret_cast<intptr_t>(list) | kListTag;
return;
}
- list()->Add(pointer);
+ list()->Add(pointer, zone);
}
// Note: returns T* and not T*& (unlike List from list.h).
diff --git a/src/splay-tree-inl.h b/src/splay-tree-inl.h
index bb6ded7..4eca71d 100644
--- a/src/splay-tree-inl.h
+++ b/src/splay-tree-inl.h
@@ -43,11 +43,10 @@
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Insert(const Key& key,
- Locator* locator,
- Allocator allocator) {
+ Locator* locator) {
if (is_empty()) {
// If the tree is empty, insert the new node.
- root_ = new(allocator) Node(key, Config::NoValue());
+ root_ = new(allocator_) Node(key, Config::NoValue());
} else {
// Splay on the key to move the last node on the search path
// for the key to the root of the tree.
@@ -59,7 +58,7 @@
return false;
}
// Insert the new node.
- Node* node = new(allocator) Node(key, Config::NoValue());
+ Node* node = new(allocator_) Node(key, Config::NoValue());
InsertInternal(cmp, node);
}
locator->bind(root_);
@@ -293,16 +292,15 @@
template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEachNode(Callback* callback,
- Allocator allocator) {
+void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
// Pre-allocate some space for tiny trees.
- List<Node*, Allocator> nodes_to_visit(10);
- if (root_ != NULL) nodes_to_visit.Add(root_, allocator);
+ List<Node*, Allocator> nodes_to_visit(10, allocator_);
+ if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
int pos = 0;
while (pos < nodes_to_visit.length()) {
Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.Add(node->left(), allocator);
- if (node->right() != NULL) nodes_to_visit.Add(node->right(), allocator);
+ if (node->left() != NULL) nodes_to_visit.Add(node->left(), allocator_);
+ if (node->right() != NULL) nodes_to_visit.Add(node->right(), allocator_);
callback->Call(node);
}
}
diff --git a/src/splay-tree.h b/src/splay-tree.h
index e21e216..388f9b5 100644
--- a/src/splay-tree.h
+++ b/src/splay-tree.h
@@ -58,7 +58,8 @@
class Locator;
- SplayTree() : root_(NULL) { }
+ SplayTree(AllocationPolicy allocator = AllocationPolicy())
+ : root_(NULL), allocator_(allocator) { }
~SplayTree();
INLINE(void* operator new(size_t size,
@@ -72,8 +73,7 @@
// Inserts the given key in this tree with the given value. Returns
// true if a node was inserted, otherwise false. If found the locator
// is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator,
- AllocationPolicy allocator = AllocationPolicy());
+ bool Insert(const Key& key, Locator* locator);
// Looks up the key in this tree and returns true if it was found,
// otherwise false. If the node is found the locator is enabled and
@@ -195,10 +195,10 @@
};
template <class Callback>
- void ForEachNode(Callback* callback,
- AllocationPolicy allocator = AllocationPolicy());
+ void ForEachNode(Callback* callback);
Node* root_;
+ AllocationPolicy allocator_;
DISALLOW_COPY_AND_ASSIGN(SplayTree);
};
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index bd7163a..2794891 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -43,7 +43,8 @@
// StubCache implementation.
-StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
+StubCache::StubCache(Isolate* isolate, Zone* zone)
+ : isolate_(isolate), zone_(zone) {
ASSERT(isolate == Isolate::Current());
}
@@ -171,6 +172,25 @@
}
+Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadViaGetter(name, receiver, holder, getter);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
@@ -384,7 +404,7 @@
Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
- Handle<JSObject> receiver,
+ Handle<Map> receiver_map,
KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode) {
KeyedAccessGrowMode grow_mode =
@@ -412,7 +432,6 @@
UNREACHABLE();
break;
}
- Handle<Map> receiver_map(receiver->map());
Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -447,7 +466,7 @@
} else {
PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
}
- JSObject::UpdateMapCodeCache(receiver, name, code);
+ Map::UpdateCodeCache(receiver_map, name, code);
return code;
}
@@ -496,6 +515,24 @@
}
+Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ StrictModeFlag strict_mode) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, CALLBACKS, strict_mode);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code = compiler.CompileStoreViaSetter(receiver, setter, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode) {
@@ -901,7 +938,7 @@
int offset = PrimaryOffset(name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map));
+ types->Add(Handle<Map>(map), zone());
}
}
}
@@ -925,7 +962,7 @@
int offset = SecondaryOffset(name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map));
+ types->Add(Handle<Map>(map), zone());
}
}
}
@@ -944,6 +981,7 @@
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
+ ASSERT(callback->IsCompatibleReceiver(args[0]));
v8::AccessorInfo info(&args[0]);
HandleScope scope(isolate);
v8::Handle<v8::Value> result;
@@ -965,6 +1003,7 @@
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
ASSERT(fun != NULL);
+ ASSERT(callback->IsCompatibleReceiver(recv));
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope(isolate);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 29bdb61..cd04143 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -90,6 +90,11 @@
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
+ Handle<Code> ComputeLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter);
+
Handle<Code> ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
@@ -157,6 +162,11 @@
Handle<AccessorInfo> callback,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ StrictModeFlag strict_mode);
+
Handle<Code> ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode);
@@ -169,7 +179,7 @@
Handle<Map> transition,
StrictModeFlag strict_mode);
- Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
+ Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<Map> receiver_map,
KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode);
@@ -300,9 +310,10 @@
Isolate* isolate() { return isolate_; }
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ Zone* zone() const { return zone_; }
private:
- explicit StubCache(Isolate* isolate);
+ StubCache(Isolate* isolate, Zone* zone);
Handle<Code> ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
@@ -375,6 +386,7 @@
Entry primary_[kPrimaryTableSize];
Entry secondary_[kSecondaryTableSize];
Isolate* isolate_;
+ Zone* zone_;
friend class Isolate;
friend class SCTableReference;
@@ -460,14 +472,16 @@
Register scratch2,
Label* miss_label);
- static void GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label);
+ void GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ int index,
+ Handle<Map> transition,
+ Handle<String> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
static void GenerateLoadMiss(MacroAssembler* masm,
Code::Kind kind);
@@ -511,6 +525,7 @@
int save_at_depth,
Label* miss);
+
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
@@ -593,6 +608,11 @@
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
+ Handle<Code> CompileLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter);
+
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
@@ -678,6 +698,10 @@
Handle<AccessorInfo> callback,
Handle<String> name);
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ Handle<String> name);
+
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
Handle<String> name);
diff --git a/src/type-info.cc b/src/type-info.cc
index 159be6a..f5e9106 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -61,9 +61,11 @@
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> global_context,
- Isolate* isolate) {
+ Isolate* isolate,
+ Zone* zone) {
global_context_ = global_context;
isolate_ = isolate;
+ zone_ = zone;
BuildDictionary(code);
ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
}
@@ -501,10 +503,10 @@
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
} else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object));
+ types->Add(Handle<Map>::cast(object), zone());
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
- types->Reserve(4);
+ types->Reserve(4, zone());
ASSERT(object->IsCode());
isolate_->stub_cache()->CollectMatchingMaps(types,
*name,
@@ -548,11 +550,12 @@
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
+ Zone* zone) {
for (int i = 0; i < list->length(); ++i) {
if (list->at(i).is_identical_to(map)) return;
}
- list->Add(map);
+ list->Add(map, zone);
}
@@ -571,7 +574,7 @@
if (object->IsMap()) {
Map* map = Map::cast(object);
if (!CanRetainOtherContext(map, *global_context_)) {
- AddMapIfMissing(Handle<Map>(map), types);
+ AddMapIfMissing(Handle<Map>(map), types, zone());
}
}
}
@@ -591,7 +594,7 @@
// infos before we process them.
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
AssertNoAllocation no_allocation;
- ZoneList<RelocInfo> infos(16);
+ ZoneList<RelocInfo> infos(16, zone());
HandleScope scope;
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
@@ -606,7 +609,7 @@
ZoneList<RelocInfo>* infos) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- infos->Add(*it.rinfo());
+ infos->Add(*it.rinfo(), zone());
}
}
diff --git a/src/type-info.h b/src/type-info.h
index d461331..74910cd 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -236,7 +236,8 @@
public:
TypeFeedbackOracle(Handle<Code> code,
Handle<Context> global_context,
- Isolate* isolate);
+ Isolate* isolate,
+ Zone* zone);
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
@@ -293,6 +294,8 @@
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
+ Zone* zone() const { return zone_; }
+
private:
void CollectReceiverTypes(unsigned ast_id,
Handle<String> name,
@@ -317,6 +320,7 @@
Handle<Context> global_context_;
Isolate* isolate_;
Handle<UnseededNumberDictionary> dictionary_;
+ Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/src/version.cc b/src/version.cc
index 4427a18..06912b1 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 11
-#define BUILD_NUMBER 9
+#define BUILD_NUMBER 10
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 0db7424..a3e42eb 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -779,10 +779,11 @@
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ : isolate()->factory()->undefined_value(),
+ zone());
break;
case Variable::PARAMETER:
@@ -837,12 +838,12 @@
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
+ globals_->Add(function, zone());
break;
}
@@ -894,8 +895,8 @@
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
@@ -1567,7 +1568,7 @@
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -4459,15 +4460,50 @@
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ push(rdx);
+
// Store result register while executing finally block.
__ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Load(rdx, pending_message_obj);
+ __ push(rdx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Load(rdx, has_pending_message);
+ __ push(rdx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Load(rdx, pending_message_script);
+ __ push(rdx);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
+ // Restore pending message from stack.
+ __ pop(rdx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Store(pending_message_script, rdx);
+
+ __ pop(rdx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Store(has_pending_message, rdx);
+
+ __ pop(rdx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Store(pending_message_obj, rdx);
+
+ // Restore result register from stack.
__ pop(result_register());
+
// Uncook return address.
__ pop(rdx);
__ SmiToInteger32(rdx, rdx);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index cbbc915..bc8f848 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -553,7 +553,7 @@
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry));
+ jump_table_.Add(JumpTableEntry(entry), zone());
}
__ j(cc, &jump_table_.last().label);
}
@@ -598,7 +598,7 @@
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -647,12 +647,12 @@
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi);
+ safepoint.DefinePointerRegister(rsi, zone());
}
}
@@ -664,7 +664,7 @@
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -1942,7 +1942,7 @@
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
@@ -2195,12 +2195,12 @@
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsFound() && lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2212,13 +2212,39 @@
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ HeapObject* current = HeapObject::cast((*type)->prototype());
+ Heap* heap = type->GetHeap();
+ while (current != heap->null_value()) {
+ Handle<HeapObject> link(current);
+ __ LoadHeapObject(result, link);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Handle<Map>(JSObject::cast(current)->map()));
+ DeoptimizeIf(not_equal, env);
+ current = HeapObject::cast(current->map()->prototype());
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
+// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
+// prototype chain, which causes unbounded code generation.
+static bool CompactEmit(
+ SmallMapList* list, Handle<String> name, int i, Isolate* isolate) {
+ LookupResult lookup(isolate);
+ Handle<Map> map = list->at(i);
+ map->LookupInDescriptors(NULL, *name, &lookup);
+ return lookup.IsFound() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
+}
+
+
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
@@ -2232,18 +2258,32 @@
}
Handle<String> name = instr->hydrogen()->name();
Label done;
+ bool all_are_compact = true;
+ for (int i = 0; i < map_count; ++i) {
+ if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
+ all_are_compact = false;
+ break;
+ }
+ }
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ Label check_passed;
+ __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ j(not_equal, &next, Label::kNear);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ jmp(&done, Label::kNear);
+ bool compact = all_are_compact ? true :
+ CompactEmit(instr->hydrogen()->types(), name, i, isolate());
+ __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
@@ -2389,8 +2429,13 @@
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ Condition smi = __ CheckSmi(result);
+ DeoptimizeIf(NegateCondition(smi), instr->environment());
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
}
@@ -2880,7 +2925,7 @@
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -3072,7 +3117,7 @@
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
@@ -3614,7 +3659,7 @@
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@@ -3668,7 +3713,7 @@
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3748,7 +3793,7 @@
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->TempAt(0));
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
} else {
@@ -3790,6 +3835,10 @@
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(input);
+ }
}
__ SmiToInteger32(input, input);
}
@@ -3908,7 +3957,7 @@
ASSERT(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
__ bind(deferred->exit());
@@ -4162,7 +4211,8 @@
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
@@ -4784,7 +4834,7 @@
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 1f5382a..99e7ec8 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -53,20 +53,20 @@
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, zone),
+ jump_table_(4, zone),
+ deoptimization_literals_(8, zone),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(zone),
- deferred_(8),
+ deferred_(8, zone),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
+ zone_(zone),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- zone_(zone) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -160,7 +160,7 @@
void Abort(const char* format, ...);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -301,7 +301,8 @@
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -346,13 +347,13 @@
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ Zone* zone_;
+
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
- Zone* zone_;
-
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index 877ea8c..22183a2 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -36,7 +36,7 @@
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32) {}
+ : cgen_(owner), moves_(32, owner->zone()) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
@@ -74,7 +74,7 @@
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 048169c..d06a6a4 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -369,9 +369,9 @@
// stack slots for int32 values.
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
+ return LStackSlot::Create(index, zone());
}
}
@@ -467,23 +467,23 @@
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
- instructions_.Add(gap);
+ instructions_.Add(gap, zone());
index = instructions_.length();
- instructions_.Add(instr);
+ instructions_.Add(instr, zone());
} else {
index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
+ pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
+ return LConstantOperand::Create(constant->id(), zone());
}
@@ -522,7 +522,8 @@
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
}
@@ -757,7 +758,7 @@
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -1594,7 +1595,7 @@
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new LDateField(object, instr->index());
+ LDateField* result = new(zone()) LDateField(object, instr->index());
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1643,14 +1644,13 @@
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -2103,7 +2103,7 @@
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new LAllocateObject(TempRegister());
+ LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 6efec00..d038dda 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -333,8 +333,11 @@
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
+ Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -2161,13 +2164,13 @@
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
+ LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
@@ -2212,9 +2215,11 @@
}
void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
+ inlined_closures_.Add(closure, zone());
}
+ Zone* zone() const { return graph_->zone(); }
+
private:
int spill_slot_count_;
CompilationInfo* info_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 95b43f4..7d5d6d3 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2760,7 +2760,7 @@
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
+ current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
j(equal, early_success, Label::kNear);
Cmp(FieldOperand(obj, HeapObject::kMapOffset),
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index d6936ae..a72a0a0 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -117,10 +117,12 @@
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
Mode mode,
- int registers_to_save)
- : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(Isolate::Current(), NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4),
+ code_relative_fixup_positions_(4, zone),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 9b26069..a082cf2 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -41,7 +41,7 @@
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerX64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerX64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -240,7 +240,7 @@
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset());
+ code_relative_fixup_positions_.Add(masm_.pc_offset(), zone());
}
void FixupCodeRelativePositions();
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 2cfe17b..a6acd97 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -731,10 +731,22 @@
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
@@ -743,7 +755,32 @@
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -754,11 +791,11 @@
if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
+ __ pop(scratch1); // Return address.
__ push(receiver_reg);
__ Push(transition);
__ push(rax);
- __ push(scratch);
+ __ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -769,14 +806,14 @@
if (!transition.is_null()) {
// Update the map of the object.
- __ Move(scratch, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch);
+ __ Move(scratch1, transition);
+ __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
- scratch,
+ scratch1,
name_reg,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -797,19 +834,19 @@
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch, offset), rax);
+ __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch1, offset), rax);
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
__ RecordWriteField(
- scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
}
// Return the value (register rax).
@@ -1110,8 +1147,9 @@
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -2321,7 +2359,13 @@
Label miss;
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ rdx, rcx, rbx, rdi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2380,6 +2424,52 @@
}
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<JSObject> receiver,
+ Handle<JSFunction> setter,
+ Handle<String> name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(rax);
+
+ // Call the JavaScript getter with the receiver and the value on the stack.
+ __ push(rdx);
+ __ push(rax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(rax);
+
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver,
Handle<String> name) {
@@ -2494,7 +2584,13 @@
__ j(not_equal, &miss);
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ rdx, rcx, rbx, rdi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2648,6 +2744,44 @@
}
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rax, &miss);
+ CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss);
+
+ {
+ FrameScope scope(masm(), StackFrame::INTERNAL);
+
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(rax);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
diff --git a/src/zone-inl.h b/src/zone-inl.h
index b484208..d75e297 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -94,29 +94,13 @@
}
-// TODO(isolates): for performance reasons, this should be replaced with a new
-// operator that takes the zone in which the object should be
-// allocated.
-void* ZoneObject::operator new(size_t size) {
- return ZONE->New(static_cast<int>(size));
-}
-
void* ZoneObject::operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
inline void* ZoneAllocationPolicy::New(size_t size) {
- if (zone_) {
- return zone_->New(size);
- } else {
- return ZONE->New(size);
- }
-}
-
-
-template <typename T>
-void* ZoneList<T>::operator new(size_t size) {
- return ZONE->New(static_cast<int>(size));
+ ASSERT(zone_);
+ return zone_->New(size);
}
diff --git a/src/zone.h b/src/zone.h
index 699374e..1bc4984 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -148,7 +148,6 @@
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- INLINE(void* operator new(size_t size));
INLINE(void* operator new(size_t size, Zone* zone));
// Ideally, the delete operator should be private instead of
@@ -168,7 +167,7 @@
// structures to allocate themselves and their elements in the Zone.
struct ZoneAllocationPolicy {
public:
- explicit ZoneAllocationPolicy(Zone* zone = NULL) : zone_(zone) { }
+ explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
INLINE(void* New(size_t size));
INLINE(static void Delete(void *pointer)) { }
@@ -186,14 +185,13 @@
public:
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
- explicit ZoneList(int capacity, Zone* zone = NULL)
+ ZoneList(int capacity, Zone* zone)
: List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
INLINE(void* operator new(size_t size, Zone* zone));
- INLINE(void* operator new(size_t size));
// Construct a new ZoneList by copying the elements of the given ZoneList.
- explicit ZoneList(const ZoneList<T>& other, Zone* zone = NULL)
+ ZoneList(const ZoneList<T>& other, Zone* zone)
: List<T, ZoneAllocationPolicy>(other.length(),
ZoneAllocationPolicy(zone)) {
AddAll(other, ZoneAllocationPolicy(zone));
@@ -201,28 +199,28 @@
// We add some convenience wrappers so that we can pass in a Zone
// instead of a (less convenient) ZoneAllocationPolicy.
- INLINE(void Add(const T& element, Zone* zone = NULL)) {
+ INLINE(void Add(const T& element, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
}
INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
- Zone* zone = NULL)) {
+ Zone* zone)) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
- INLINE(void AddAll(const Vector<T>& other, Zone* zone = NULL)) {
+ INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
- INLINE(void InsertAt(int index, const T& element, Zone* zone = NULL)) {
+ INLINE(void InsertAt(int index, const T& element, Zone* zone)) {
List<T, ZoneAllocationPolicy>::InsertAt(index, element,
ZoneAllocationPolicy(zone));
}
- INLINE(Vector<T> AddBlock(T value, int count, Zone* zone = NULL)) {
+ INLINE(Vector<T> AddBlock(T value, int count, Zone* zone)) {
return List<T, ZoneAllocationPolicy>::AddBlock(value, count,
ZoneAllocationPolicy(zone));
}
- INLINE(void Allocate(int length, Zone* zone = NULL)) {
+ INLINE(void Allocate(int length, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Allocate(length, ZoneAllocationPolicy(zone));
}
- INLINE(void Initialize(int capacity, Zone* zone = NULL)) {
+ INLINE(void Initialize(int capacity, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Initialize(capacity,
ZoneAllocationPolicy(zone));
}
@@ -263,8 +261,8 @@
template <typename Config>
class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
public:
- ZoneSplayTree()
- : SplayTree<Config, ZoneAllocationPolicy>() {}
+ explicit ZoneSplayTree(Zone* zone)
+ : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
~ZoneSplayTree();
};