Version 3.19.11
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@15012 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 4cdd77e..ed1e265 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -6112,6 +6112,25 @@
}
+void v8::ArrayBuffer::Neuter() {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ ApiCheck(obj->is_external(),
+ "v8::ArrayBuffer::Neuter",
+ "Only externalized ArrayBuffers can be neutered");
+ LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
+ ENTER_V8(isolate);
+
+ for (i::Handle<i::Object> array_obj(obj->weak_first_array(), isolate);
+ *array_obj != i::Smi::FromInt(0);) {
+ i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*array_obj));
+ typed_array->Neuter();
+ array_obj = i::handle(typed_array->weak_next(), isolate);
+ }
+ obj->Neuter();
+}
+
+
size_t v8::ArrayBuffer::ByteLength() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
@@ -6206,6 +6225,9 @@
obj->set_buffer(*buffer);
+ obj->set_weak_next(buffer->weak_first_array());
+ buffer->set_weak_first_array(*obj);
+
i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
static_cast<double>(byte_offset));
obj->set_byte_offset(*byte_offset_object);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index b9322b8..ee5517c 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1081,9 +1081,8 @@
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@@ -1259,6 +1258,65 @@
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(r0, &convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
+ __ bind(&convert);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(r0);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5463a9a..548da2e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2354,10 +2354,11 @@
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index d1be92f..d1d4fe0 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -87,10 +87,8 @@
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
+ info()->CommitDependentMaps(code);
+
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
@@ -5399,11 +5397,7 @@
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index c7ccdfb..45e4af6 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -56,7 +56,6 @@
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -410,7 +409,6 @@
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 4d415ef..f05cba5 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -122,7 +122,7 @@
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/array.js b/src/array.js
index dcfcde5..5f89ebb 100644
--- a/src/array.js
+++ b/src/array.js
@@ -399,7 +399,7 @@
n--;
var value = this[n];
- EnqueueSpliceRecord(this, n, [value], 1, 0);
+ EnqueueSpliceRecord(this, n, [value], 0);
try {
BeginPerformSplice(this);
@@ -441,7 +441,7 @@
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
- EnqueueSpliceRecord(this, n, [], 0, m);
+ EnqueueSpliceRecord(this, n, [], m);
try {
BeginPerformSplice(this);
@@ -581,7 +581,7 @@
function ObservedArrayShift(len) {
var first = this[0];
- EnqueueSpliceRecord(this, 0, [first], 1, 0);
+ EnqueueSpliceRecord(this, 0, [first], 0);
try {
BeginPerformSplice(this);
@@ -627,7 +627,7 @@
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- EnqueueSpliceRecord(this, 0, [], 0, num_arguments);
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
try {
BeginPerformSplice(this);
@@ -779,7 +779,6 @@
EnqueueSpliceRecord(this,
start_i,
deleted_elements.slice(),
- deleted_elements.length,
num_elements_to_add);
}
}
diff --git a/src/ast.cc b/src/ast.cc
index b4c0430..a5d1e2d 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -655,17 +655,15 @@
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
- elements_kind_ = oracle->GetCallNewElementsKind(this);
+ Object* value = allocation_info_cell_->value();
+ if (value->IsSmi()) {
+ elements_kind_ = static_cast<ElementsKind>(Smi::cast(value)->value());
+ }
}
- Handle<Object> alloc_elements_kind = oracle->GetInfo(CallNewFeedbackId());
-// if (alloc_elements_kind->IsSmi())
-// alloc_elements_kind_ = Handle<Smi>::cast(alloc_elements_kind);
- alloc_elements_kind_ = alloc_elements_kind->IsSmi()
- ? Handle<Smi>::cast(alloc_elements_kind)
- : handle(Smi::FromInt(GetInitialFastElementsKind()), oracle->isolate());
}
@@ -1156,6 +1154,7 @@
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
diff --git a/src/ast.h b/src/ast.h
index 2ffa473..219a69b 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -90,6 +90,7 @@
V(WhileStatement) \
V(ForStatement) \
V(ForInStatement) \
+ V(ForOfStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement)
@@ -874,50 +875,132 @@
};
-class ForInStatement: public IterationStatement {
+class ForEachStatement: public IterationStatement {
public:
- DECLARE_NODE_TYPE(ForInStatement)
+ enum VisitMode {
+ ENUMERATE, // for (each in subject) body;
+ ITERATE // for (each of subject) body;
+ };
- void Initialize(Expression* each, Expression* enumerable, Statement* body) {
+ void Initialize(Expression* each, Expression* subject, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
- enumerable_ = enumerable;
- for_in_type_ = SLOW_FOR_IN;
+ subject_ = subject;
}
Expression* each() const { return each_; }
- Expression* enumerable() const { return enumerable_; }
+ Expression* subject() const { return subject_; }
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
- BailoutId BodyId() const { return body_id_; }
- BailoutId PrepareId() const { return prepare_id_; }
+ protected:
+ ForEachStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ each_(NULL),
+ subject_(NULL) {
+ }
+
+ private:
+ Expression* each_;
+ Expression* subject_;
+};
+
+
+class ForInStatement: public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForInStatement)
+
+ Expression* enumerable() const {
+ return subject();
+ }
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
+ BailoutId BodyId() const { return body_id_; }
+ BailoutId PrepareId() const { return prepare_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+
protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
+ : ForEachStatement(isolate, labels),
+ for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
}
- private:
- Expression* each_;
- Expression* enumerable_;
-
ForInType for_in_type_;
-
const BailoutId body_id_;
const BailoutId prepare_id_;
};
+class ForOfStatement: public ForEachStatement {
+ public:
+ DECLARE_NODE_TYPE(ForOfStatement)
+
+ void Initialize(Expression* each,
+ Expression* subject,
+ Statement* body,
+ Expression* assign_iterator,
+ Expression* next_result,
+ Expression* result_done,
+ Expression* assign_each) {
+ ForEachStatement::Initialize(each, subject, body);
+ assign_iterator_ = assign_iterator;
+ next_result_ = next_result;
+ result_done_ = result_done;
+ assign_each_ = assign_each;
+ }
+
+ Expression* iterable() const {
+ return subject();
+ }
+
+ // var iterator = iterable;
+ Expression* assign_iterator() const {
+ return assign_iterator_;
+ }
+
+ // var result = iterator.next();
+ Expression* next_result() const {
+ return next_result_;
+ }
+
+ // result.done
+ Expression* result_done() const {
+ return result_done_;
+ }
+
+ // each = result.value
+ Expression* assign_each() const {
+ return assign_each_;
+ }
+
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return BackEdgeId(); }
+
+ BailoutId BackEdgeId() const { return back_edge_id_; }
+
+ protected:
+ ForOfStatement(Isolate* isolate, ZoneStringList* labels)
+ : ForEachStatement(isolate, labels),
+ assign_iterator_(NULL),
+ next_result_(NULL),
+ result_done_(NULL),
+ assign_each_(NULL),
+ back_edge_id_(GetNextId(isolate)) {
+ }
+
+ Expression* assign_iterator_;
+ Expression* next_result_;
+ Expression* result_done_;
+ Expression* assign_each_;
+ const BailoutId back_edge_id_;
+};
+
+
class ExpressionStatement: public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
@@ -1682,7 +1765,9 @@
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
- Handle<Smi> allocation_elements_kind() const { return alloc_elements_kind_; }
+ Handle<JSGlobalPropertyCell> allocation_info_cell() const {
+ return allocation_info_cell_;
+ }
BailoutId ReturnId() const { return return_id_; }
@@ -1707,7 +1792,7 @@
bool is_monomorphic_;
Handle<JSFunction> target_;
ElementsKind elements_kind_;
- Handle<Smi> alloc_elements_kind_;
+ Handle<JSGlobalPropertyCell> allocation_info_cell_;
const BailoutId return_id_;
};
@@ -2853,10 +2938,25 @@
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
- STATEMENT_WITH_LABELS(ForInStatement)
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
+ ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ ZoneStringList* labels) {
+ switch (visit_mode) {
+ case ForEachStatement::ENUMERATE: {
+ ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels);
+ VISIT_AND_RETURN(ForInStatement, stmt);
+ }
+ case ForEachStatement::ITERATE: {
+ ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels);
+ VISIT_AND_RETURN(ForOfStatement, stmt);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 5ef8d30..a51a9b1 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1086,11 +1086,13 @@
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
- factory->undefined_value(), DONT_ENUM));
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
- factory->undefined_value(), DONT_ENUM));
+ factory->undefined_value(), DONT_ENUM,
+ Object::FORCE_TAGGED));
#ifdef DEBUG
LookupResult lookup(isolate);
@@ -1322,8 +1324,7 @@
Handle<JSFunction> array_buffer_fun =
InstallFunction(
global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSize +
- v8::ArrayBuffer::kInternalFieldCount * kPointerSize,
+ JSArrayBuffer::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 4c6ee86..b4479da 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -305,6 +305,27 @@
}
+InlineCacheState ICCompareStub::GetICState() {
+ CompareIC::State state = Max(left_, right_);
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case CompareIC::SMI:
+ case CompareIC::NUMBER:
+ case CompareIC::INTERNALIZED_STRING:
+ case CompareIC::STRING:
+ case CompareIC::UNIQUE_NAME:
+ case CompareIC::OBJECT:
+ case CompareIC::KNOWN_OBJECT:
+ return MONOMORPHIC;
+ case CompareIC::GENERIC:
+ return ::v8::internal::GENERIC;
+ }
+ UNREACHABLE();
+ return ::v8::internal::UNINITIALIZED;
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ad1f09b..f4e2b7d 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1080,6 +1080,8 @@
return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
+ virtual InlineCacheState GetICState();
+
private:
class OpField: public BitField<int, 0, 3> { };
class LeftStateField: public BitField<int, 3, 4> { };
@@ -1205,7 +1207,7 @@
}
static byte ExtractTypesFromExtraICState(
Code::ExtraICState state) {
- return state & ((1<<NUMBER_OF_TYPES)-1);
+ return state & ((1 << NUMBER_OF_TYPES) - 1);
}
void Record(Handle<Object> object);
@@ -2027,6 +2029,13 @@
return types_.ToIntegral();
}
+ virtual InlineCacheState GetICState() {
+ if (types_.IsEmpty()) {
+ return ::v8::internal::UNINITIALIZED;
+ } else {
+ return MONOMORPHIC;
+ }
+ }
private:
Major MajorKey() { return ToBoolean; }
diff --git a/src/compiler.cc b/src/compiler.cc
index 5320fc9..c6b911f 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -106,6 +106,9 @@
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ dependent_maps_[i] = NULL;
+ }
if (mode == STUB) {
mode_ = STUB;
return;
@@ -125,6 +128,41 @@
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
delete no_frame_ranges_;
+#ifdef DEBUG
+ // Check that no dependent maps have been added or added dependent maps have
+ // been rolled back or committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ASSERT_EQ(NULL, dependent_maps_[i]);
+ }
+#endif // DEBUG
+}
+
+
+void CompilationInfo::CommitDependentMaps(Handle<Code> code) {
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
+ if (group_maps == NULL) continue;
+ ASSERT(!object_wrapper_.is_null());
+ for (int j = 0; j < group_maps->length(); j++) {
+ group_maps->at(j)->dependent_code()->UpdateToFinishedCode(
+ static_cast<DependentCode::DependencyGroup>(i), this, *code);
+ }
+ dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
+ }
+}
+
+
+void CompilationInfo::RollbackDependentMaps() {
+ // Unregister from all dependent maps if not yet committed.
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
+ if (group_maps == NULL) continue;
+ for (int j = 0; j < group_maps->length(); j++) {
+ group_maps->at(j)->dependent_code()->RemoveCompilationInfo(
+ static_cast<DependentCode::DependencyGroup>(i), this);
+ }
+ dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
+ }
}
@@ -950,9 +988,6 @@
if (status == OptimizingCompiler::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
- // Do a scavenge to put off the next scavenge as far as possible.
- // This may ease the issue that GVN blocks the next scavenge.
- isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile");
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
@@ -985,7 +1020,7 @@
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
+ info->AbortOptimization();
InstallFullCode(*info);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** aborting optimization for ");
@@ -1003,9 +1038,11 @@
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (status != OptimizingCompiler::SUCCEEDED) {
- optimizing_compiler->info()->set_bailout_reason(
- "failed/bailed out last time");
+ if (info->HasAbortedDueToDependentMap()) {
+ info->set_bailout_reason("bailed out due to dependent map");
+ status = optimizing_compiler->AbortOptimization();
+ } else if (status != OptimizingCompiler::SUCCEEDED) {
+ info->set_bailout_reason("failed/bailed out last time");
status = optimizing_compiler->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
diff --git a/src/compiler.h b/src/compiler.h
index 8e6d295..f53feb9 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -57,12 +57,8 @@
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
- CompilationInfo(Handle<Script> script, Zone* zone);
- CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
-
- ~CompilationInfo();
+ virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
@@ -243,6 +239,17 @@
deferred_handles_ = deferred_handles;
}
+ ZoneList<Handle<Map> >* dependent_maps(DependentCode::DependencyGroup group) {
+ if (dependent_maps_[group] == NULL) {
+ dependent_maps_[group] = new(zone_) ZoneList<Handle<Map> >(2, zone_);
+ }
+ return dependent_maps_[group];
+ }
+
+ void CommitDependentMaps(Handle<Code> code);
+
+ void RollbackDependentMaps();
+
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
@@ -276,6 +283,26 @@
return result;
}
+ Handle<Foreign> object_wrapper() {
+ if (object_wrapper_.is_null()) {
+ object_wrapper_ =
+ isolate()->factory()->NewForeign(reinterpret_cast<Address>(this));
+ }
+ return object_wrapper_;
+ }
+
+ void AbortDueToDependentMap() {
+ mode_ = DEPENDENT_MAP_ABORT;
+ }
+
+ bool HasAbortedDueToDependentMap() {
+ return mode_ == DEPENDENT_MAP_ABORT;
+ }
+
+ protected:
+ CompilationInfo(Handle<Script> script, Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
private:
Isolate* isolate_;
@@ -289,7 +316,8 @@
BASE,
OPTIMIZE,
NONOPT,
- STUB
+ STUB,
+ DEPENDENT_MAP_ABORT
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
@@ -369,6 +397,8 @@
DeferredHandles* deferred_handles_;
+ ZoneList<Handle<Map> >* dependent_maps_[DependentCode::kGroupCount];
+
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
@@ -387,6 +417,8 @@
// during graph optimization.
int opt_count_;
+ Handle<Foreign> object_wrapper_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -407,11 +439,18 @@
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
- explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
zone_(isolate),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ // Virtual destructor because a CompilationInfoWithZone has to exit the
+ // zone scope and get rid of dependent maps even when the destructor is
+ // called when cast as a CompilationInfo.
+ virtual ~CompilationInfoWithZone() {
+ RollbackDependentMaps();
+ }
+
private:
Zone zone_;
ZoneScope zone_scope_;
diff --git a/src/factory.cc b/src/factory.cc
index f963334..c47b57d 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -41,6 +41,14 @@
namespace internal {
+Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateBox(*value, pretenure),
+ Box);
+}
+
+
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
diff --git a/src/factory.h b/src/factory.h
index 66304a9..d59d742 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -39,6 +39,11 @@
class Factory {
public:
+ // Allocate a new boxed value.
+ Handle<Box> NewBox(
+ Handle<Object> value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a new uninitialized fixed array.
Handle<FixedArray> NewFixedArray(
int size,
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0a6e576..49dac4a 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -170,6 +170,7 @@
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
+DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@@ -178,6 +179,7 @@
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
+DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@@ -199,8 +201,10 @@
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
+DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
+DEFINE_implication(track_computed_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index ad2a994..fe3c43f 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -163,6 +163,12 @@
}
+void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
+ // For-of is breakable because of the next() call.
+ is_breakable_ = true;
+}
+
+
void BreakableStatementChecker::VisitTryCatchStatement(
TryCatchStatement* stmt) {
// Mark try catch as breakable to avoid adding a break slot in front of it.
diff --git a/src/heap.cc b/src/heap.cc
index 6a89efd..2817fcb 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -180,6 +180,7 @@
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
native_contexts_list_ = NULL;
+ array_buffers_list_ = Smi::FromInt(0);
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
@@ -1539,11 +1540,6 @@
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
- Context* tail = NULL;
- Object* candidate = native_contexts_list_;
-
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
@@ -1551,6 +1547,16 @@
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
+ ProcessArrayBuffers(retainer, record_slots);
+ ProcessNativeContexts(retainer, record_slots);
+}
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* undefined = undefined_value();
+ Object* head = undefined;
+ Context* tail = NULL;
+ Object* candidate = native_contexts_list_;
while (candidate != undefined) {
// Check whether to keep the candidate in the list.
@@ -1619,6 +1625,101 @@
}
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+static Object* VisitWeakList(Object* list,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer, bool record_slots) {
+ Object* head = Smi::FromInt(0);
+ T* tail = NULL;
+ while (list != Smi::FromInt(0)) {
+ Object* retained = retainer->RetainAs(list);
+ if (retained != NULL) {
+ if (head == Smi::FromInt(0)) {
+ head = retained;
+ } else {
+ ASSERT(tail != NULL);
+ WeakListVisitor<T>::set_weak_next(tail, retained);
+ if (record_slots) {
+ Object** next_slot =
+ HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
+ collector->RecordSlot(next_slot, next_slot, retained);
+ }
+ }
+ tail = reinterpret_cast<T*>(retained);
+ WeakListVisitor<T>::VisitLiveObject(
+ tail, collector, retainer, record_slots);
+ }
+ list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
+ }
+ if (tail != NULL) {
+ tail->set_weak_next(Smi::FromInt(0));
+ }
+ return head;
+}
+
+
+template<>
+struct WeakListVisitor<JSTypedArray> {
+ static void set_weak_next(JSTypedArray* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* get_weak_next(JSTypedArray* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(JSTypedArray* obj,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {}
+
+ static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
+};
+
+
+template<>
+struct WeakListVisitor<JSArrayBuffer> {
+ static void set_weak_next(JSArrayBuffer* obj, Object* next) {
+ obj->set_weak_next(next);
+ }
+
+ static Object* get_weak_next(JSArrayBuffer* obj) {
+ return obj->weak_next();
+ }
+
+ static void VisitLiveObject(JSArrayBuffer* array_buffer,
+ MarkCompactCollector* collector,
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* typed_array_obj =
+ VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
+ collector, retainer, record_slots);
+ array_buffer->set_weak_first_array(typed_array_obj);
+ if (typed_array_obj != Smi::FromInt(0) && record_slots) {
+ Object** slot = HeapObject::RawField(
+ array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
+ collector->RecordSlot(slot, slot, typed_array_obj);
+ }
+ }
+
+ static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
+};
+
+
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
+ bool record_slots) {
+ Object* array_buffer_obj =
+ VisitWeakList<JSArrayBuffer>(array_buffers_list(),
+ mark_compact_collector(),
+ retainer, record_slots);
+ set_array_buffers_list(array_buffer_obj);
+}
+
+
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
@@ -1794,6 +1895,14 @@
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
+ table_.Register(kVisitJSArrayBuffer,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
+ table_.Register(kVisitJSTypedArray,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ Visit);
+
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@@ -2701,6 +2810,15 @@
}
+MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
+ Box* result;
+ MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set_value(value);
+ return result;
+}
+
+
MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
@@ -2845,6 +2963,13 @@
}
set_the_hole_value(Oddball::cast(obj));
+ { MaybeObject* maybe_obj = CreateOddball("uninitialized",
+ Smi::FromInt(-1),
+ Oddball::kUninitialized);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_uninitialized_value(Oddball::cast(obj));
+
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-4),
Oddball::kArgumentMarker);
diff --git a/src/heap.h b/src/heap.h
index 6d04454..65deb1a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -59,6 +59,7 @@
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
+ V(Oddball, uninitialized_value, UninitializedValue) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
@@ -295,7 +296,8 @@
V(send_string, "send") \
V(throw_string, "throw") \
V(done_string, "done") \
- V(value_string, "value")
+ V(value_string, "value") \
+ V(next_string, "next")
// Forward declarations.
class GCTracer;
@@ -938,6 +940,10 @@
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
+ // Allocate Box.
+ MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
+ PretenureFlag pretenure);
+
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -1347,6 +1353,12 @@
}
Object* native_contexts_list() { return native_contexts_list_; }
+ void set_array_buffers_list(Object* object) {
+ array_buffers_list_ = object;
+ }
+ Object* array_buffers_list() { return array_buffers_list_; }
+
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -2017,6 +2029,8 @@
Object* native_contexts_list_;
+ Object* array_buffers_list_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2160,6 +2174,9 @@
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
+ void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
+
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index d3f1a9e..073f7a1 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1058,6 +1058,7 @@
block()->graph()->GetInvalidContext(), current_index, add_offset);
add->InsertBefore(this);
add->AssumeRepresentation(index()->representation());
+ add->ClearFlag(kCanOverflow);
current_index = add;
}
@@ -1308,6 +1309,30 @@
}
+Range* HUnaryMathOperation::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && value()->HasRange()) {
+ if (op() == kMathAbs) {
+ int upper = value()->range()->upper();
+ int lower = value()->range()->lower();
+ bool spans_zero = value()->range()->CanBeZero();
+ // Math.abs(kMinInt) overflows its representation, on which the
+ // instruction deopts. Hence clamp it to kMaxInt.
+ int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
+ int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
+ Range* result =
+ new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
+ Max(abs_lower, abs_upper));
+ // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
+ // Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
+ }
+ }
+ return HValue::InferRange(zone);
+}
+
+
void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
const char* name = OpName();
stream->Add("%s ", name);
@@ -3068,6 +3093,16 @@
}
+Representation HUnaryMathOperation::RepresentationFromInputs() {
+ Representation rep = representation();
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation input_rep = value()->representation();
+ if (!input_rep.IsTagged()) rep = rep.generalize(input_rep);
+ return rep;
+}
+
+
HType HStringCharFromCode::CalculateInferredType() {
return HType::String();
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 4a8a406..6b9ff0c 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -271,6 +271,10 @@
bool IsInSmiRange() const {
return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
+ void ClampToSmi() {
+ lower_ = Max(lower_, Smi::kMinValue);
+ upper_ = Min(upper_, Smi::kMaxValue);
+ }
void KeepOrder();
#ifdef DEBUG
void Verify() const;
@@ -2644,7 +2648,10 @@
}
}
+ virtual Range* InferRange(Zone* zone);
+
virtual HValue* Canonicalize();
+ virtual Representation RepresentationFromInputs();
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2959,21 +2966,33 @@
public:
HCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder,
- Zone* zone)
+ Zone* zone,
+ CompilationInfo* info)
: prototypes_(2, zone),
maps_(2, zone),
first_prototype_unique_id_(),
- last_prototype_unique_id_() {
+ last_prototype_unique_id_(),
+ can_omit_prototype_maps_(true) {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// Keep a list of all objects on the prototype chain up to the holder
// and the expected maps.
while (true) {
prototypes_.Add(prototype, zone);
- maps_.Add(Handle<Map>(prototype->map()), zone);
+ Handle<Map> map(prototype->map());
+ maps_.Add(map, zone);
+ can_omit_prototype_maps_ &= map->CanOmitPrototypeChecks();
if (prototype.is_identical_to(holder)) break;
prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
}
+ if (can_omit_prototype_maps_) {
+ // Mark in-flight compilation as dependent on those maps.
+ for (int i = 0; i < maps()->length(); i++) {
+ Handle<Map> map = maps()->at(i);
+ map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
+ info);
+ }
+ }
}
ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
@@ -2998,12 +3017,7 @@
last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
}
- bool CanOmitPrototypeChecks() {
- for (int i = 0; i < maps()->length(); i++) {
- if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
- }
- return true;
- }
+ bool CanOmitPrototypeChecks() { return can_omit_prototype_maps_; }
protected:
virtual bool DataEquals(HValue* other) {
@@ -3017,6 +3031,7 @@
ZoneList<Handle<Map> > maps_;
UniqueValueId first_prototype_unique_id_;
UniqueValueId last_prototype_unique_id_;
+ bool can_omit_prototype_maps_;
};
@@ -3653,9 +3668,6 @@
virtual Representation RequiredInputRepresentation(int arg_index) {
return representation();
}
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
virtual bool IsRelationTrueInternal(NumericRelation relation,
HValue* related_value,
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d1cfb8e..42f5dad 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -527,6 +527,7 @@
void HGraph::Verify(bool do_full_verify) const {
Heap::RelocationLock(isolate()->heap());
+ AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -3810,7 +3811,7 @@
void HOptimizedGraphBuilder::Bailout(const char* reason) {
- info()->set_bailout_reason(reason);
+ current_info()->set_bailout_reason(reason);
SetStackOverflow();
}
@@ -3867,11 +3868,11 @@
bool HOptimizedGraphBuilder::BuildGraph() {
- if (info()->function()->is_generator()) {
+ if (current_info()->function()->is_generator()) {
Bailout("function is a generator");
return false;
}
- Scope* scope = info()->scope();
+ Scope* scope = current_info()->scope();
if (scope->HasIllegalRedeclaration()) {
Bailout("function with illegal redeclaration");
return false;
@@ -3915,7 +3916,7 @@
AddInstruction(
new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
- VisitStatements(info()->function()->body());
+ VisitStatements(current_info()->function()->body());
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
@@ -3927,7 +3928,7 @@
// last time this function was compiled, then this recompile is likely not
// due to missing/inadequate type feedback, but rather too aggressive
// optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
+ Handle<Code> unoptimized_code(current_info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
@@ -4114,7 +4115,7 @@
static BoundsCheckKey* Create(Zone* zone,
HBoundsCheck* check,
int32_t* offset) {
- if (!check->index()->representation().IsInteger32()) return NULL;
+ if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
HValue* index_base = NULL;
HConstant* constant = NULL;
@@ -4210,7 +4211,7 @@
// returns false, otherwise it returns true.
bool CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
- ASSERT(new_check->index()->representation().IsInteger32());
+ ASSERT(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
if (new_offset > upper_offset_) {
@@ -4319,8 +4320,8 @@
HValue* index_context = IndexContext(*add, check);
if (index_context == NULL) return false;
- HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(new_offset, Representation::Integer32());
+ HConstant* new_constant = new(BasicBlock()->zone()) HConstant(
+ new_offset, representation);
if (*add == NULL) {
new_constant->InsertBefore(check);
(*add) = HAdd::New(
@@ -4452,7 +4453,7 @@
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey()->ActualValue();
- if (!index->representation().IsInteger32()) return;
+ if (!index->representation().IsSmiOrInteger32()) return;
HConstant* constant;
HValue* subexpression;
@@ -4564,7 +4565,6 @@
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
- AllowDeferredHandleDereference debug_output;
if (ref != NULL) {
ref->PrintTo(&stream);
} else {
@@ -5117,7 +5117,7 @@
bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == info()->osr_ast_id();
+ return statement->OsrEntryId() == current_info()->osr_ast_id();
}
@@ -5447,6 +5447,14 @@
}
+void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("ForOfStatement");
+}
+
+
void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -5496,9 +5504,9 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+ SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+ shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
@@ -5559,10 +5567,10 @@
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !info()->has_global_object()) {
+ if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
@@ -5577,7 +5585,7 @@
HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
- int length = info()->scope()->ContextChainLength(var->scope());
+ int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
HInstruction* context_instruction = new(zone()) HOuterContext(context);
AddInstruction(context_instruction);
@@ -5613,12 +5621,12 @@
LookupGlobalProperty(variable, &lookup, false);
if (type == kUseCell &&
- info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->global_object()->IsAccessCheckNeeded()) {
type = kUseGeneric;
}
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HLoadGlobalCell* instr =
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
@@ -6209,7 +6217,8 @@
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(map->prototype())),
Handle<JSObject>(JSObject::cast(proto)),
- zone()));
+ zone(),
+ top_info()));
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
@@ -6548,7 +6557,7 @@
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr =
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
@@ -6613,13 +6622,13 @@
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will be allocated to context slots. We have no
// direct way to detect that the variable is a parameter so we do
// a linear search of the parameter variables.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
Bailout(
"assignment to parameter, function uses arguments object");
}
@@ -6839,12 +6848,12 @@
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct way
// to detect that the variable is a parameter.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
@@ -7006,8 +7015,8 @@
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ prototype, holder, zone(), top_info()));
HValue* holder_value = AddInstruction(new(zone())
HConstant(holder, Representation::Tagged()));
return BuildLoadNamedField(holder_value,
@@ -7021,7 +7030,8 @@
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ prototype, holder, zone(), top_info()));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
return new(zone()) HConstant(function, Representation::Tagged());
}
@@ -7058,8 +7068,8 @@
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, object_prototype, zone()));
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ prototype, object_prototype, zone(), top_info()));
load_mode = ALLOW_RETURN_HOLE;
graph()->MarkDependsOnEmptyArrayProtoElements();
}
@@ -7574,8 +7584,8 @@
Handle<Map> receiver_map) {
if (!holder.is_null()) {
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- AddInstruction(
- new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ prototype, holder, zone(), top_info()));
}
}
@@ -7720,7 +7730,7 @@
expr->ComputeTarget(map, name);
AddCheckPrototypeMaps(expr->holder(), map);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
@@ -7804,7 +7814,7 @@
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
@@ -7840,7 +7850,7 @@
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
- Handle<JSFunction> caller = info()->closure();
+ Handle<JSFunction> caller = current_info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
@@ -7849,7 +7859,7 @@
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
- CompilationInfo* outer_info = info();
+ CompilationInfo* outer_info = current_info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
@@ -8284,7 +8294,8 @@
Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
expr->holder()->GetIsolate()),
expr->holder(),
- zone()));
+ zone(),
+ top_info()));
HInstruction* char_code =
BuildStringCharCodeAt(context, string, index);
if (id == kStringCharCodeAt) {
@@ -8435,7 +8446,7 @@
return false;
}
- if (info()->scope()->arguments() == NULL) return false;
+ if (current_info()->scope()->arguments() == NULL) return false;
ZoneList<Expression*>* args = expr->arguments();
if (args->length() != 2) return false;
@@ -8676,8 +8687,8 @@
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
- !info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(info()->global_object());
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
@@ -8712,7 +8723,7 @@
}
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(info()->closure())) {
+ if (expr->target().is_identical_to(current_info()->closure())) {
graph()->MarkRecursive();
}
@@ -8915,18 +8926,7 @@
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
- // TODO(mvstanton): It would be better to use the already created global
- // property cell that is shared by full code gen. That way, any transition
- // information that happened after crankshaft won't be lost. The right
- // way to do that is to begin passing the cell to the type feedback oracle
- // instead of just the value in the cell. Do this in a follow-up checkin.
- Handle<Smi> feedback = expr->allocation_elements_kind();
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(feedback);
-
- // TODO(mvstanton): Here we should probably insert code to check if the
- // type cell elements kind is different from when we compiled, and deopt
- // in that case. Do this in a follow-up checin.
+ Handle<JSGlobalPropertyCell> cell = expr->allocation_info_cell();
call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell);
} else {
@@ -9222,13 +9222,13 @@
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (info()->scope()->arguments() != NULL) {
+ if (current_info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct
// way to detect that the variable is a parameter so we use a
// linear search of the parameter list.
- int count = info()->scope()->num_parameters();
+ int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == info()->scope()->parameter(i)) {
+ if (var == current_info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
@@ -9827,10 +9827,10 @@
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
- info()->has_global_object() &&
- !info()->global_object()->IsAccessCheckNeeded()) {
+ current_info()->has_global_object() &&
+ !current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
- Handle<GlobalObject> global(info()->global_object());
+ Handle<GlobalObject> global(current_info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
@@ -10270,9 +10270,9 @@
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(info()->language_mode());
+ int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
HInstruction* result = new(zone()) HDeclareGlobals(
environment()->LookupContext(), array, flags);
AddInstruction(result);
@@ -10326,8 +10326,8 @@
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
+ Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
+ declaration->fun(), current_info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
@@ -11248,14 +11248,16 @@
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
ASSERT(!FLAG_parallel_recompilation);
- AllowDeferredHandleDereference debug_output;
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
ASSERT(!FLAG_parallel_recompilation);
- AllowDeferredHandleDereference debug_output;
+ AllowHandleDereference allow_deref;
+ AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 5cb99a1..00cd9be 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -968,6 +968,7 @@
Zone* zone() const { return info_->zone(); }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
+ CompilationInfo* top_info() { return info_; }
HGraph* CreateGraph();
@@ -1489,7 +1490,7 @@
void set_ast_context(AstContext* context) { ast_context_ = context; }
// Accessors forwarded to the function state.
- CompilationInfo* info() const {
+ CompilationInfo* current_info() const {
return function_state()->compilation_info();
}
AstContext* call_context() const {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 58ddbad..82ef657 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1033,9 +1033,8 @@
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@@ -1198,6 +1197,64 @@
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(eax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 98f1e8b..5d1f46d 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -109,10 +109,8 @@
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
+ info()->CommitDependentMaps(code);
+
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
@@ -5988,11 +5986,7 @@
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 647dd0e..70ecf5c 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,7 +58,6 @@
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -409,7 +408,6 @@
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index c5c7755..1b92c60 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2478,10 +2478,11 @@
LStoreNamedField* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 397ebde..9a166d7 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -104,7 +104,7 @@
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 9defc5e..1fd921f 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1348,6 +1348,7 @@
PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
} else {
ASSERT(chunk_->info()->IsOptimizing());
+ AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
*chunk_->info()->function()->debug_name()->ToCString());
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a07b468..99c5b48 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2481,11 +2481,12 @@
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
for (int i = 0; i < number_of_entries; i++) {
+ if (!entries->is_code_at(i)) continue;
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
}
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
@@ -2502,14 +2503,15 @@
for (int g = 0; g < DependentCode::kGroupCount; g++) {
int group_number_of_entries = 0;
for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+ if (!entries->is_code_at(i)) continue;
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
if (new_number_of_entries + group_number_of_entries != i) {
- entries->set_code_at(new_number_of_entries +
- group_number_of_entries, code);
+ entries->set_object_at(
+ new_number_of_entries + group_number_of_entries, code);
}
- Object** slot = entries->code_slot_at(new_number_of_entries +
- group_number_of_entries);
+ Object** slot = entries->slot_at(new_number_of_entries +
+ group_number_of_entries);
RecordSlot(slot, slot, code);
group_number_of_entries++;
}
@@ -2520,7 +2522,7 @@
new_number_of_entries += group_number_of_entries;
}
for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
}
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index e1b01b3..fa97ee4 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -87,10 +87,8 @@
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
+ info()->CommitDependentMaps(code);
+
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
@@ -5127,11 +5125,7 @@
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index a208c40..d96755c 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -55,7 +55,6 @@
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -412,7 +411,6 @@
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 2247e7c..be3279f 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -2229,10 +2229,11 @@
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 977f050..2961519 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -122,7 +122,7 @@
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/src/object-observe.js b/src/object-observe.js
index 5ca70bc..ada7919 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -294,7 +294,7 @@
EndPerformChange(objectInfo, 'splice');
}
-function EnqueueSpliceRecord(array, index, removed, deleteCount, addedCount) {
+function EnqueueSpliceRecord(array, index, removed, addedCount) {
var objectInfo = objectInfoMap.get(array);
if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
return;
@@ -307,7 +307,6 @@
addedCount: addedCount
};
- changeRecord.removed.length = deleteCount;
ObjectFreeze(changeRecord);
ObjectFreeze(changeRecord.removed);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 891f0d2..4008181 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -312,8 +312,9 @@
Representation r = descriptors->GetDetails(i).representation();
int field = descriptors->GetFieldIndex(i);
Object* value = RawFastPropertyAt(field);
- if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsDouble()) ASSERT(value->IsHeapNumber());
+ if (value->IsUninitialized()) continue;
+ if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
}
}
@@ -777,6 +778,12 @@
}
+void Box::BoxVerify() {
+ CHECK(IsBox());
+ value()->Verify();
+}
+
+
void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
VerifyPointer(flag());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 956d088..581935f 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -292,6 +292,9 @@
PretenureFlag tenure) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
+ if (IsUninitialized()) {
+ return heap->AllocateHeapNumber(0, tenure);
+ }
return heap->AllocateHeapNumber(Number(), tenure);
}
@@ -530,6 +533,11 @@
}
+bool MaybeObject::IsUninitialized() {
+ return !IsFailure() && ToObjectUnchecked()->IsUninitialized();
+}
+
+
Failure* Failure::cast(MaybeObject* obj) {
ASSERT(HAS_FAILURE_TAG(obj));
return reinterpret_cast<Failure*>(obj);
@@ -845,6 +853,11 @@
}
+bool Object::IsUninitialized() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
+}
+
+
bool Object::IsTrue() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
@@ -1541,7 +1554,7 @@
// Converting any field to the most specific type will cause the
// GeneralizeFieldRepresentation algorithm to create the most general existing
// transition that matches the object. This achieves what is needed.
- return GeneralizeFieldRepresentation(0, Representation::Smi());
+ return GeneralizeFieldRepresentation(0, Representation::None());
}
@@ -2366,7 +2379,6 @@
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
@@ -2382,7 +2394,6 @@
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@@ -3617,6 +3628,9 @@
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsNone()) {
+ return true;
+ }
if (FLAG_track_fields && details.representation().IsSmi()) {
return true;
}
@@ -3645,17 +3659,6 @@
}
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code) {
- Handle<DependentCode> codes =
- DependentCode::Insert(Handle<DependentCode>(dependent_code()),
- group, code);
- if (*codes != dependent_code()) {
- set_dependent_code(*codes);
- }
-}
-
-
int DependentCode::number_of_entries(DependencyGroup group) {
if (length() == 0) return 0;
return Smi::cast(get(group))->value();
@@ -3667,32 +3670,52 @@
}
+bool DependentCode::is_code_at(int i) {
+ return get(kCodesStartIndex + i)->IsCode();
+}
+
Code* DependentCode::code_at(int i) {
return Code::cast(get(kCodesStartIndex + i));
}
-void DependentCode::set_code_at(int i, Code* value) {
- set(kCodesStartIndex + i, value);
+CompilationInfo* DependentCode::compilation_info_at(int i) {
+ return reinterpret_cast<CompilationInfo*>(
+ Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
}
-Object** DependentCode::code_slot_at(int i) {
+void DependentCode::set_object_at(int i, Object* object) {
+ set(kCodesStartIndex + i, object);
+}
+
+
+Object* DependentCode::object_at(int i) {
+ return get(kCodesStartIndex + i);
+}
+
+
+Object** DependentCode::slot_at(int i) {
return HeapObject::RawField(
this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
}
-void DependentCode::clear_code_at(int i) {
+void DependentCode::clear_at(int i) {
set_undefined(kCodesStartIndex + i);
}
+void DependentCode::copy(int from, int to) {
+ set(kCodesStartIndex + to, get(kCodesStartIndex + from));
+}
+
+
void DependentCode::ExtendGroup(DependencyGroup group) {
GroupStartIndexes starts(this);
for (int g = kGroupCount - 1; g > group; g--) {
if (starts.at(g) < starts.at(g + 1)) {
- set_code_at(starts.at(g + 1), code_at(starts.at(g)));
+ copy(starts.at(g), starts.at(g + 1));
}
}
}
@@ -4369,6 +4392,8 @@
ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+ACCESSORS(Box, value, Object, kValueOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -5314,10 +5339,15 @@
}
+ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
+ACCESSORS(JSArrayBuffer, weak_first_array, Object, kWeakFirstArrayOffset)
+
+
ACCESSORS(JSTypedArray, buffer, Object, kBufferOffset)
ACCESSORS(JSTypedArray, byte_offset, Object, kByteOffsetOffset)
ACCESSORS(JSTypedArray, byte_length, Object, kByteLengthOffset)
ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
+ACCESSORS(JSTypedArray, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 8eab562..357d984 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -540,8 +540,6 @@
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
case JS_ARRAY_TYPE: return "JS_ARRAY";
- case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
- case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
case JS_PROXY_TYPE: return "JS_PROXY";
case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
case JS_REGEXP_TYPE: return "JS_REGEXP";
@@ -549,6 +547,8 @@
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
+ case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
+ case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
case FOREIGN_TYPE: return "FOREIGN";
case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
@@ -970,6 +970,13 @@
}
+void Box::BoxPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Box");
+ PrintF(out, "\n - value: ");
+ value()->ShortPrint(out);
+}
+
+
void AccessorPair::AccessorPairPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessorPair");
PrintF(out, "\n - getter: ");
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index f83f00f..829eab8 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -79,6 +79,10 @@
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
@@ -99,6 +103,43 @@
template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+ return JSArrayBuffer::kSizeWithInternalFields;
+}
+
+
+template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSize));
+ return JSTypedArray::kSize;
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor,
@@ -149,6 +190,10 @@
table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+ table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
// Registration for kVisitJSRegExp is done by StaticVisitor.
table_.Register(kVisitPropertyCell,
@@ -401,6 +446,41 @@
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ STATIC_ASSERT(
+ JSArrayBuffer::kWeakFirstArrayOffset ==
+ JSArrayBuffer::kWeakNextOffset + kPointerSize);
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+ StaticVisitor::VisitPointers(
+ map->GetHeap(),
+ HeapObject::RawField(object,
+ JSTypedArray::kWeakNextOffset + kPointerSize),
+ HeapObject::RawField(object, JSTypedArray::kSize));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
Heap* heap, Map* map) {
// Make sure that the back pointer stored either in the map itself or
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 7b5c8be..4bf2804 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -134,6 +134,12 @@
case FILLER_TYPE:
return kVisitDataObjectGeneric;
+ case JS_ARRAY_BUFFER_TYPE:
+ return kVisitJSArrayBuffer;
+
+ case JS_TYPED_ARRAY_TYPE:
+ return kVisitJSTypedArray;
+
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -145,8 +151,6 @@
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index d4a2ed2..c4d1cc3 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -92,6 +92,8 @@
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakMap) \
+ V(JSArrayBuffer) \
+ V(JSTypedArray) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -333,6 +335,9 @@
return FreeSpace::cast(object)->Size();
}
+ INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+
class DataObjectVisitor {
public:
template<int object_size>
@@ -407,6 +412,8 @@
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+ INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
+ INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
diff --git a/src/objects.cc b/src/objects.cc
index 24c60ec..4fecb0b 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1817,7 +1817,8 @@
MaybeObject* JSObject::AddFastProperty(Name* name,
Object* value,
PropertyAttributes attributes,
- StoreFromKeyed store_mode) {
+ StoreFromKeyed store_mode,
+ ValueType value_type) {
ASSERT(!IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
map()->instance_descriptors()->Search(
@@ -1843,8 +1844,8 @@
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- Representation representation = IsJSContextExtensionObject()
- ? Representation::Tagged() : value->OptimalRepresentation();
+ if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ Representation representation = value->OptimalRepresentation(value_type);
FieldDescriptor new_field(name, index, attributes, representation);
@@ -1961,7 +1962,8 @@
PropertyAttributes attributes,
StrictModeFlag strict_mode,
JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check) {
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -1988,7 +1990,8 @@
JSFunction::cast(value),
attributes);
} else {
- result = AddFastProperty(name, value, attributes, store_mode);
+ result = AddFastProperty(
+ name, value, attributes, store_mode, value_type);
}
} else {
// Normalize the object to prevent very large instance descriptors.
@@ -2272,7 +2275,7 @@
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if (new_desc->GetDetails(i).representation().IsDouble() &&
- old_desc->GetDetails(i).representation().IsSmi()) {
+ !old_desc->GetDetails(i).representation().IsDouble()) {
return true;
}
}
@@ -2343,8 +2346,9 @@
? old_descriptors->GetValue(i)
: RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
if (FLAG_track_double_fields &&
- old_details.representation().IsSmi() &&
+ !old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
+ if (old_details.representation().IsNone()) value = Smi::FromInt(0);
// Objects must be allocated in the old object space, since the
// overall number of HeapNumbers needed for the conversion might
// exceed the capacity of new space, and we would fail repeatedly
@@ -2397,7 +2401,7 @@
MaybeObject* maybe_new_map =
map()->GeneralizeRepresentation(modify_index, new_representation);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- ASSERT(map() != new_map || new_map->FindRootMap()->is_deprecated());
+ if (map() == new_map) return this;
return MigrateToMap(new_map);
}
@@ -2574,10 +2578,21 @@
Representation old_representation =
old_descriptors->GetDetails(modify_index).representation();
- if (old_representation.IsNone()) {
- UNREACHABLE();
+ // It's fine to transition from None to anything but double without any
+ // modification to the object, because the default uninitialized value for
+ // representation None can be overwritten by both smi and tagged values.
+ // Doubles, however, would require a box allocation.
+ if (old_representation.IsNone() &&
+ !new_representation.IsNone() &&
+ !new_representation.IsDouble()) {
+ if (FLAG_trace_generalization) {
+ PrintF("initializing representation %i: %p -> %s\n",
+ modify_index,
+ static_cast<void*>(this),
+ new_representation.Mnemonic());
+ }
old_descriptors->SetRepresentation(modify_index, new_representation);
- return this;
+ return old_map;
}
int descriptors = old_map->NumberOfOwnDescriptors();
@@ -2603,7 +2618,7 @@
updated_descriptors->GetDetails(modify_index).representation();
if (new_representation.fits_into(updated_representation)) {
if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsSmi())) {
+ !(modify_index == 0 && new_representation.IsNone())) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PrintF("migrating to existing map %p(%s) -> %p(%s)\n",
static_cast<void*>(this),
@@ -2641,7 +2656,7 @@
old_descriptors->GetKey(descriptor), new_descriptors);
if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsSmi())) {
+ !(modify_index == 0 && new_representation.IsNone())) {
PrintF("migrating to new map %i: %p(%s) -> %p(%s) (%i steps)\n",
modify_index,
static_cast<void*>(this),
@@ -3933,10 +3948,12 @@
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ ValueType value_type) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+ object->SetLocalPropertyIgnoreAttributes(
+ *key, *value, attributes, value_type),
Object);
}
@@ -3944,7 +3961,8 @@
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Name* name_raw,
Object* value_raw,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ ValueType value_type) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -3970,13 +3988,16 @@
return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
name_raw,
value_raw,
- attributes);
+ attributes,
+ value_type);
}
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name_raw, value_raw, attributes, kNonStrictMode);
+ return AddProperty(
+ name_raw, value_raw, attributes, kNonStrictMode,
+ MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type);
}
// From this point on everything needs to be handlified.
@@ -4003,9 +4024,11 @@
}
case FIELD: {
Representation representation = lookup.representation();
- if (!value->FitsRepresentation(representation)) {
+ Representation value_representation =
+ value->OptimalRepresentation(value_type);
+ if (!value_representation.fits_into(representation)) {
MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
- lookup.GetDescriptorIndex(), value->OptimalRepresentation());
+ lookup.GetDescriptorIndex(), value_representation);
if (maybe_failure->IsFailure()) return maybe_failure;
DescriptorArray* desc = self->map()->instance_descriptors();
int descriptor = lookup.GetDescriptorIndex();
@@ -4046,9 +4069,11 @@
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
Representation representation = details.representation();
- if (!value->FitsRepresentation(representation)) {
+ Representation value_representation =
+ value->OptimalRepresentation(value_type);
+ if (!value_representation.fits_into(representation)) {
MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value->OptimalRepresentation());
+ descriptor, value_representation);
if (!maybe_map->To(&transition_map)) return maybe_map;
Object* back = transition_map->GetBackPointer();
if (back->IsMap()) {
@@ -8515,6 +8540,8 @@
const uint8_t* buffer8_;
const uint16_t* buffer16_;
};
+
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(State);
};
@@ -10850,24 +10877,18 @@
return true;
}
-
-// TODO(rafaelw): Remove |delete_count| argument and rely on the length of
-// of |deleted|.
static void EnqueueSpliceRecord(Handle<JSArray> object,
uint32_t index,
Handle<JSArray> deleted,
- uint32_t delete_count,
uint32_t add_count) {
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> delete_count_object =
- isolate->factory()->NewNumberFromUint(delete_count);
Handle<Object> add_count_object =
isolate->factory()->NewNumberFromUint(add_count);
Handle<Object> args[] =
- { object, index_object, deleted, delete_count_object, add_count_object };
+ { object, index_object, deleted, add_count_object };
bool threw;
Execution::Call(Handle<JSFunction>(isolate->observers_enqueue_splice()),
@@ -10971,14 +10992,18 @@
uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
uint32_t delete_count = new_length < old_length ? old_length - new_length : 0;
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- if (delete_count) {
+ if (delete_count > 0) {
for (int i = indices.length() - 1; i >= 0; i--) {
JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
kNonStrictMode);
}
+
+ SetProperty(deleted, isolate->factory()->length_string(),
+ isolate->factory()->NewNumberFromUint(delete_count),
+ NONE, kNonStrictMode);
}
- EnqueueSpliceRecord(self, index, deleted, delete_count, add_count);
+ EnqueueSpliceRecord(self, index, deleted, add_count);
return *hresult;
}
@@ -11065,6 +11090,23 @@
}
+void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()), group, info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependent_maps(group)->Add(Handle<Map>(this), info->zone());
+}
+
+
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()), group, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
Recompute(entries);
}
@@ -11081,13 +11123,13 @@
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
- Handle<Code> value) {
+ Handle<Object> object) {
GroupStartIndexes starts(*entries);
int start = starts.at(group);
int end = starts.at(group + 1);
int number_of_entries = starts.number_of_entries();
- if (start < end && entries->code_at(end - 1) == *value) {
- // Do not append the code if it is already in the array.
+ if (start < end && entries->object_at(end - 1) == *object) {
+ // Do not append the compilation info if it is already in the array.
// It is sufficient to just check only the last element because
// we process embedded maps of an optimized code in one batch.
return entries;
@@ -11104,7 +11146,7 @@
end = starts.at(group + 1);
number_of_entries = starts.number_of_entries();
for (int i = 0; i < number_of_entries; i++) {
- entries->clear_code_at(i);
+ entries->clear_at(i);
}
// If the old fixed array was empty, we need to reset counters of the
// new array.
@@ -11116,17 +11158,78 @@
entries = new_entries;
}
entries->ExtendGroup(group);
- entries->set_code_at(end, *value);
+ entries->set_object_at(end, *object);
entries->set_number_of_entries(group, end + 1 - start);
return entries;
}
+void DependentCode::UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code) {
+ DisallowHeapAllocation no_gc;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ set_object_at(i, code);
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = start; i < end; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
+void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info) {
+ DisallowHeapAllocation no_allocation;
+ AllowDeferredHandleDereference get_object_wrapper;
+ Foreign* info_wrapper = *info->object_wrapper();
+ GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ // Find compilation info wrapper.
+ int info_pos = -1;
+ for (int i = start; i < end; i++) {
+ if (object_at(i) == info_wrapper) {
+ info_pos = i;
+ break;
+ }
+ }
+ if (info_pos == -1) return; // Not found.
+ int gap = info_pos;
+ // Use the last of each group to fill the gap in the previous group.
+ for (int i = group; i < kGroupCount; i++) {
+ int last_of_group = starts.at(group + 1) - 1;
+ ASSERT(last_of_group >= gap);
+ if (last_of_group == gap) continue;
+ copy(last_of_group, gap);
+ gap = last_of_group;
+ }
+ clear_at(gap); // Clear last gap.
+ set_number_of_entries(group, end - start - 1);
+
+#ifdef DEBUG
+ for (int i = start; i < end - 1; i++) {
+ ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+ }
+#endif
+}
+
+
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
- int number_of_entries = starts.at(kGroupCount);
+ int number_of_entries = starts.number_of_code_entries();
for (int i = 0; i < number_of_entries; i++) {
- if (code_at(i) == code) return true;
+ if (object_at(i) == code) return true;
}
return false;
}
@@ -11147,20 +11250,25 @@
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
- int number_of_entries = starts.at(DependentCode::kGroupCount);
+ int code_entries = starts.number_of_code_entries();
if (start == end) return;
for (int i = start; i < end; i++) {
- Code* code = code_at(i);
- code->set_marked_for_deoptimization(true);
+ if (is_code_at(i)) {
+ Code* code = code_at(i);
+ code->set_marked_for_deoptimization(true);
+ } else {
+ CompilationInfo* info = compilation_info_at(i);
+ info->AbortDueToDependentMap();
+ }
}
// Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
- set_code_at(dst, code_at(src));
+ for (int src = end, dst = start; src < code_entries; src++, dst++) {
+ copy(src, dst);
}
// Now the holes are at the end of the array, zap them for heap-verifier.
int removed = end - start;
- for (int i = number_of_entries - removed; i < number_of_entries; i++) {
- clear_code_at(i);
+ for (int i = code_entries - removed; i < code_entries; i++) {
+ clear_at(i);
}
set_number_of_entries(group, 0);
DeoptimizeDependentCodeFilter filter;
@@ -12044,7 +12152,7 @@
old_length_handle);
EndPerformSplice(Handle<JSArray>::cast(self));
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted, 0,
+ EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted,
new_length - old_length);
} else {
EnqueueChangeRecord(self, "new", name, old_value);
@@ -15669,4 +15777,19 @@
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+
+void JSArrayBuffer::Neuter() {
+ ASSERT(is_external());
+ set_backing_store(NULL);
+ set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSTypedArray::Neuter() {
+ set_byte_offset(Smi::FromInt(0));
+ set_byte_length(Smi::FromInt(0));
+ set_length(Smi::FromInt(0));
+ set_elements(GetHeap()->EmptyExternalArrayForMap(map()));
+}
+
} } // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index b7b32f8..3ebe9c0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -125,6 +125,7 @@
// - Foreign
// - SharedFunctionInfo
// - Struct
+// - Box
// - DeclaredAccessorDescriptor
// - AccessorInfo
// - DeclaredAccessorInfo
@@ -348,6 +349,7 @@
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(BOX_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
@@ -526,6 +528,7 @@
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST_ALL(V) \
+ V(BOX, Box, box) \
V(DECLARED_ACCESSOR_DESCRIPTOR, \
DeclaredAccessorDescriptor, \
declared_accessor_descriptor) \
@@ -667,6 +670,7 @@
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
+ BOX_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
@@ -869,6 +873,7 @@
inline bool IsOutOfMemory();
inline bool IsException();
INLINE(bool IsTheHole());
+ INLINE(bool IsUninitialized());
inline bool ToObject(Object** obj) {
if (IsFailure()) return false;
*obj = reinterpret_cast<Object*>(this);
@@ -1046,6 +1051,7 @@
INLINE(bool IsUndefined());
INLINE(bool IsNull());
INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
+ INLINE(bool IsUninitialized());
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
@@ -1060,16 +1066,24 @@
bool ToInt32(int32_t* value);
bool ToUint32(uint32_t* value);
- inline Representation OptimalRepresentation() {
- if (FLAG_track_fields && IsSmi()) {
+ // Indicates whether OptimalRepresentation can do its work, or whether it
+ // always has to return Representation::Tagged().
+ enum ValueType {
+ OPTIMAL_REPRESENTATION,
+ FORCE_TAGGED
+ };
+
+ inline Representation OptimalRepresentation(
+ ValueType type = OPTIMAL_REPRESENTATION) {
+ if (!FLAG_track_fields) return Representation::Tagged();
+ if (type == FORCE_TAGGED) return Representation::Tagged();
+ if (IsSmi()) {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
- } else if (FLAG_track_heap_object_fields && !IsUndefined()) {
- // Don't track undefined as heapobject because it's also used as temporary
- // value for computed fields that may turn out to be Smi. That combination
- // will go tagged, so go tagged immediately.
- // TODO(verwaest): Change once we track computed boilerplate fields.
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ return Representation::None();
+ } else if (FLAG_track_heap_object_fields) {
ASSERT(IsHeapObject());
return Representation::HeapObject();
} else {
@@ -1078,7 +1092,9 @@
}
inline bool FitsRepresentation(Representation representation) {
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (FLAG_track_fields && representation.IsNone()) {
+ return false;
+ } else if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
return IsNumber();
@@ -1827,7 +1843,8 @@
Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -1854,7 +1871,8 @@
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
Name* key,
Object* value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -2216,7 +2234,8 @@
Name* name,
Object* value,
PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
@@ -2230,7 +2249,8 @@
PropertyAttributes attributes,
StrictModeFlag strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -4960,6 +4980,8 @@
};
+class CompilationInfo;
+
// This class describes the layout of dependent codes array of a map. The
// array is partitioned into several groups of dependent codes. Each group
// contains codes with the same dependency on the map. The array has the
@@ -5007,14 +5029,23 @@
void Recompute(DependentCode* entries);
int at(int i) { return start_indexes_[i]; }
int number_of_entries() { return start_indexes_[kGroupCount]; }
+ int number_of_code_entries() {
+ return start_indexes_[kGroupCount];
+ }
private:
int start_indexes_[kGroupCount + 1];
};
bool Contains(DependencyGroup group, Code* code);
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Code> value);
+ DependencyGroup group,
+ Handle<Object> object);
+ void UpdateToFinishedCode(DependencyGroup group,
+ CompilationInfo* info,
+ Code* code);
+ void RemoveCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
@@ -5022,10 +5053,14 @@
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
inline void set_number_of_entries(DependencyGroup group, int value);
+ inline bool is_code_at(int i);
inline Code* code_at(int i);
- inline void set_code_at(int i, Code* value);
- inline Object** code_slot_at(int i);
- inline void clear_code_at(int i);
+ inline CompilationInfo* compilation_info_at(int i);
+ inline void set_object_at(int i, Object* object);
+ inline Object** slot_at(int i);
+ inline Object* object_at(int i);
+ inline void clear_at(int i);
+ inline void copy(int from, int to);
static inline DependentCode* cast(Object* object);
private:
@@ -5534,8 +5569,11 @@
inline bool CanOmitPrototypeChecks();
- inline void AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code);
+ void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
+ CompilationInfo* info);
+
+ void AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code);
bool IsMapInArrayPrototypeChain();
@@ -5669,6 +5707,26 @@
};
+// A simple one-element struct, useful where smis need to be boxed.
+class Box : public Struct {
+ public:
+ // [value]: the boxed contents.
+ DECL_ACCESSORS(value, Object)
+
+ static inline Box* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Box)
+ DECLARE_VERIFIER(Box)
+
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Box);
+};
+
+
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
@@ -8483,7 +8541,8 @@
static const byte kNull = 3;
static const byte kArgumentMarker = 4;
static const byte kUndefined = 5;
- static const byte kOther = 6;
+ static const byte kUninitialized = 6;
+ static const byte kOther = 7;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
@@ -8745,9 +8804,18 @@
inline bool is_external();
inline void set_is_external(bool value);
+ // [weak_next]: linked list of array buffers.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // [weak_first_array]: weak linked list of typed arrays.
+ DECL_ACCESSORS(weak_first_array, Object)
+
// Casting.
static inline JSArrayBuffer* cast(Object* obj);
+ // Neutering. Only neuters the buffer, not associated typed arrays.
+ void Neuter();
+
// Dispatched behavior.
DECLARE_PRINTER(JSArrayBuffer)
DECLARE_VERIFIER(JSArrayBuffer)
@@ -8755,7 +8823,12 @@
static const int kBackingStoreOffset = JSObject::kHeaderSize;
static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
static const int kFlagOffset = kByteLengthOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kWeakNextOffset = kFlagOffset + kPointerSize;
+ static const int kWeakFirstArrayOffset = kWeakNextOffset + kPointerSize;
+ static const int kSize = kWeakFirstArrayOffset + kPointerSize;
+
+ static const int kSizeWithInternalFields =
+ kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
private:
// Bit position in a flag
@@ -8779,6 +8852,12 @@
// [length]: length of typed array in elements.
DECL_ACCESSORS(length, Object)
+ // [weak_next]: linked list of typed arrays over the same array buffer.
+ DECL_ACCESSORS(weak_next, Object)
+
+ // Neutering. Only neuters this typed array.
+ void Neuter();
+
// Casting.
static inline JSTypedArray* cast(Object* obj);
@@ -8793,7 +8872,8 @@
static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
static const int kLengthOffset = kByteLengthOffset + kPointerSize;
- static const int kSize = kLengthOffset + kPointerSize;
+ static const int kWeakNextOffset = kLengthOffset + kPointerSize;
+ static const int kSize = kWeakNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
diff --git a/src/parser.cc b/src/parser.cc
index 4aaa9a1..fa24bf7 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -549,6 +549,7 @@
allow_natives_syntax_(false),
allow_lazy_(false),
allow_generators_(false),
+ allow_for_of_(false),
stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
@@ -560,6 +561,7 @@
set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
set_allow_lazy(false); // Must be explicitly enabled.
set_allow_generators(FLAG_harmony_generators);
+ set_allow_for_of(FLAG_harmony_iteration);
}
@@ -1028,7 +1030,7 @@
}
default: {
- ExpectContextualKeyword("at", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("at"), CHECK_OK);
Module* result = ParseModuleUrl(CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
@@ -1200,7 +1202,7 @@
names.Add(name, zone());
}
- ExpectContextualKeyword("from", CHECK_OK);
+ ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
Module* module = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
@@ -2622,6 +2624,90 @@
}
+bool Parser::CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
+ if (Check(Token::IN)) {
+ *visit_mode = ForEachStatement::ENUMERATE;
+ return true;
+ } else if (allow_for_of() && CheckContextualKeyword(CStrVector("of"))) {
+ *visit_mode = ForEachStatement::ITERATE;
+ return true;
+ }
+ return false;
+}
+
+
+void Parser::InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body) {
+ ForOfStatement* for_of = stmt->AsForOfStatement();
+
+ if (for_of != NULL) {
+ Factory* heap_factory = isolate()->factory();
+ Handle<String> iterator_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".iterator"));
+ Handle<String> result_str = heap_factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".result"));
+ Variable* iterator =
+ top_scope_->DeclarationScope()->NewTemporary(iterator_str);
+ Variable* result = top_scope_->DeclarationScope()->NewTemporary(result_str);
+
+ Expression* assign_iterator;
+ Expression* next_result;
+ Expression* result_done;
+ Expression* assign_each;
+
+ // var iterator = iterable;
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, iterator_proxy, subject, RelocInfo::kNoPosition);
+ }
+
+ // var result = iterator.next();
+ {
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ Expression* next_literal =
+ factory()->NewLiteral(heap_factory->next_string());
+ Expression* next_property = factory()->NewProperty(
+ iterator_proxy, next_literal, RelocInfo::kNoPosition);
+ ZoneList<Expression*>* next_arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ Expression* next_call = factory()->NewCall(
+ next_property, next_arguments, RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ next_result = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, next_call, RelocInfo::kNoPosition);
+ }
+
+ // result.done
+ {
+ Expression* done_literal =
+ factory()->NewLiteral(heap_factory->done_string());
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ result_done = factory()->NewProperty(
+ result_proxy, done_literal, RelocInfo::kNoPosition);
+ }
+
+ // each = result.value
+ {
+ Expression* value_literal =
+ factory()->NewLiteral(heap_factory->value_string());
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ Expression* result_value = factory()->NewProperty(
+ result_proxy, value_literal, RelocInfo::kNoPosition);
+ assign_each = factory()->NewAssignment(
+ Token::ASSIGN, each, result_value, RelocInfo::kNoPosition);
+ }
+
+ for_of->Initialize(each, subject, body,
+ assign_iterator, next_result, result_done, assign_each);
+ } else {
+ stmt->Initialize(each, subject, body);
+ }
+}
+
+
Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
@@ -2642,21 +2728,21 @@
Handle<String> name;
Block* variable_statement =
ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
+ ForEachStatement::VisitMode mode;
- if (peek() == Token::IN && !name.is_null()) {
+ if (!name.is_null() && CheckInOrOf(&mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
- loop->Initialize(each, enumerable, body);
+ InitializeForEachStatement(loop, each, enumerable, body);
Block* result = factory()->NewBlock(NULL, 2, false);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
@@ -2676,7 +2762,9 @@
ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
CHECK_OK);
bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
- if (peek() == Token::IN && accept_IN) {
+ ForEachStatement::VisitMode mode;
+
+ if (accept_IN && CheckInOrOf(&mode)) {
// Rewrite a for-in statement of the form
//
// for (let x in e) b
@@ -2698,11 +2786,10 @@
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- Expect(Token::IN, CHECK_OK);
top_scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
top_scope_ = for_scope;
@@ -2719,7 +2806,7 @@
body_block->AddStatement(variable_statement, zone());
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
- loop->Initialize(temp_proxy, enumerable, body_block);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2732,7 +2819,9 @@
}
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
- if (peek() == Token::IN) {
+ ForEachStatement::VisitMode mode;
+
+ if (CheckInOrOf(&mode)) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report
@@ -2742,15 +2831,14 @@
isolate()->factory()->invalid_lhs_in_for_in_string();
expression = NewThrowReferenceError(message);
}
- ForInStatement* loop = factory()->NewForInStatement(labels);
+ ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
Target target(&this->target_stack_, loop);
- Expect(Token::IN, CHECK_OK);
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(expression, enumerable, body);
+ InitializeForEachStatement(loop, expression, enumerable, body);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2804,10 +2892,10 @@
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
- if (loop) loop->Initialize(NULL, cond, next, body);
+ loop->Initialize(NULL, cond, next, body);
return result;
} else {
- if (loop) loop->Initialize(init, cond, next, body);
+ loop->Initialize(init, cond, next, body);
return loop;
}
}
@@ -3593,7 +3681,7 @@
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsTheHole()) {
is_holey = true;
- } else if (boilerplate_value->IsUndefined()) {
+ } else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
@@ -3693,7 +3781,7 @@
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(expression);
}
- return isolate()->factory()->undefined_value();
+ return isolate()->factory()->uninitialized_value();
}
// Validation per 11.1.5 Object Initialiser
@@ -3804,13 +3892,17 @@
Handle<Object> key = property->key()->handle();
Handle<Object> value = GetBoilerplateValue(property->value());
- // Ensure objects with doubles are always treated as nested objects.
+ // Ensure objects that may, at any point in time, contain fields with double
+ // representation are always treated as nested objects. This is true for
+ // computed fields (value is undefined), and smi and double literals
+ // (value->IsNumber()).
// TODO(verwaest): Remove once we can store them inline.
- if (FLAG_track_double_fields && value->IsNumber()) {
+ if (FLAG_track_double_fields &&
+ (value->IsNumber() || value->IsUninitialized())) {
*may_store_doubles = true;
}
- is_simple_acc = is_simple_acc && !value->IsUndefined();
+ is_simple_acc = is_simple_acc && !value->IsUninitialized();
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
@@ -4507,6 +4599,7 @@
reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
reusable_preparser_->set_allow_lazy(true);
reusable_preparser_->set_allow_generators(allow_generators());
+ reusable_preparser_->set_allow_for_of(allow_for_of());
}
preparser::PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
@@ -4604,6 +4697,16 @@
}
+bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner().is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+}
+
+
void Parser::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
@@ -4621,12 +4724,10 @@
}
-void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
+void Parser::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- Handle<String> symbol = GetSymbol();
- if (!*ok) return;
- if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) {
+ if (!scanner().is_literal_contextual_keyword(keyword)) {
*ok = false;
ReportUnexpectedToken(scanner().current_token());
}
@@ -5764,6 +5865,7 @@
preparser::PreParser preparser(&scanner, &recorder, stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_generators(FLAG_harmony_generators);
+ preparser.set_allow_for_of(FLAG_harmony_iteration);
preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
scanner.Initialize(source);
preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
diff --git a/src/parser.h b/src/parser.h
index eea617f..b7e0700 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -437,6 +437,7 @@
bool allow_modules() { return scanner().HarmonyModules(); }
bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -445,6 +446,7 @@
scanner().SetHarmonyScoping(allow);
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
@@ -686,6 +688,12 @@
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression);
+ // Initialize the components of a for-in / for-of statement.
+ void InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body);
+
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
@@ -721,13 +729,16 @@
bool is_generator() const { return current_function_state_->is_generator(); }
+ bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode);
+
bool peek_any_identifier();
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
bool Check(Token::Value token);
void ExpectSemicolon(bool* ok);
- void ExpectContextualKeyword(const char* keyword, bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
@@ -850,6 +861,7 @@
bool allow_natives_syntax_;
bool allow_lazy_;
bool allow_generators_;
+ bool allow_for_of_;
bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
diff --git a/src/preparser.cc b/src/preparser.cc
index 3bf88ca..243a3ed 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -659,6 +659,17 @@
}
+bool PreParser::CheckInOrOf() {
+ if (peek() == i::Token::IN ||
+ (allow_for_of() &&
+ scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
+ Next();
+ return true;
+ }
+ return false;
+}
+
+
PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
@@ -675,8 +686,7 @@
kForStatement, &decl_props, &decl_count, CHECK_OK);
bool accept_IN = decl_count == 1 &&
!(is_let && decl_props == kHasInitializers);
- if (peek() == i::Token::IN && accept_IN) {
- Expect(i::Token::IN, CHECK_OK);
+ if (accept_IN && CheckInOrOf()) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
@@ -685,8 +695,7 @@
}
} else {
ParseExpression(false, CHECK_OK);
- if (peek() == i::Token::IN) {
- Expect(i::Token::IN, CHECK_OK);
+ if (CheckInOrOf()) {
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
diff --git a/src/preparser.h b/src/preparser.h
index e3a036f..786316e 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -130,6 +130,7 @@
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
+ allow_for_of_(false),
parenthesized_function_(false) { }
~PreParser() {}
@@ -139,6 +140,7 @@
bool allow_modules() const { return scanner_->HarmonyModules(); }
bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
@@ -147,6 +149,7 @@
scanner_->SetHarmonyScoping(allow);
}
void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
@@ -655,6 +658,8 @@
}
void ExpectSemicolon(bool* ok);
+ bool CheckInOrOf();
+
static int Precedence(i::Token::Value tok, bool accept_IN);
void SetStrictModeViolation(i::Scanner::Location,
@@ -678,6 +683,7 @@
bool allow_lazy_;
bool allow_natives_syntax_;
bool allow_generators_;
+ bool allow_for_of_;
bool parenthesized_function_;
};
} } // v8::preparser
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 3a1eca7..23cad95 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -255,6 +255,17 @@
}
+void PrettyPrinter::VisitForOfStatement(ForOfStatement* node) {
+ PrintLabels(node->labels());
+ Print("for (");
+ Visit(node->each());
+ Print(" of ");
+ Visit(node->iterable());
+ Print(") ");
+ Visit(node->body());
+}
+
+
void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Print("try ");
Visit(node->try_block());
@@ -929,6 +940,14 @@
}
+void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
+ IndentedScope indent(this, "FOR OF");
+ PrintIndentedVisit("FOR", node->each());
+ PrintIndentedVisit("OF", node->iterable());
+ PrintIndentedVisit("BODY", node->body());
+}
+
+
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
diff --git a/src/property-details.h b/src/property-details.h
index 6d0f147..669b05d 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -113,7 +113,7 @@
bool is_more_general_than(const Representation& other) const {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
- if (IsHeapObject()) return other.IsDouble();
+ if (IsHeapObject()) return other.IsDouble() || other.IsNone();
return kind_ > other.kind_;
}
@@ -213,6 +213,7 @@
}
Representation representation() {
+ ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
diff --git a/src/property.h b/src/property.h
index 124775f..f853fc8 100644
--- a/src/property.h
+++ b/src/property.h
@@ -203,6 +203,8 @@
}
bool CanHoldValue(Handle<Object> value) {
+ if (IsNormal()) return true;
+ ASSERT(!IsTransition());
return value->FitsRepresentation(details_.representation());
}
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index c69011f..3b9a2f6 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -44,8 +44,8 @@
buffer_(buffer),
pc_(0),
own_buffer_(false),
- advance_current_end_(kInvalidPC) {
-}
+ advance_current_end_(kInvalidPC),
+ isolate_(zone->isolate()) { }
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
@@ -445,7 +445,7 @@
Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = FACTORY->NewByteArray(length());
+ Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
Copy(array->GetDataStartAddress());
return array;
}
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 3569d8b..f8a412d 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -134,6 +134,8 @@
int advance_current_offset_;
int advance_current_end_;
+ Isolate* isolate_;
+
static const int kInvalidPC = -1;
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 44fe050..df5c353 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -168,6 +168,11 @@
}
+void Processor::VisitForOfStatement(ForOfStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
// Rewrite both try and catch blocks (reversed order).
bool set_after_catch = is_set_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 0516c9c..62d044e 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -687,6 +687,10 @@
isolate->factory()->NewNumberFromSize(allocated_length);
CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
array_buffer->set_byte_length(*byte_length);
+
+ array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
+ isolate->heap()->set_array_buffers_list(*array_buffer);
+ array_buffer->set_weak_first_array(Smi::FromInt(0));
}
@@ -855,6 +859,8 @@
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
+ holder->set_weak_next(buffer->weak_first_array());
+ buffer->set_weak_first_array(*holder);
Handle<ExternalArray> elements =
isolate->factory()->NewExternalArray(
@@ -8089,13 +8095,15 @@
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation) {
- if (V8::UseCrankshaft() && function->IsOptimizable()) {
- while (!function->IsOptimized()) OS::Sleep(50);
+ if (FLAG_parallel_recompilation && V8::UseCrankshaft()) {
+ // While function is in optimization pipeline, it is marked with builtins.
+ while (function->code()->kind() == Code::BUILTIN) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ OS::Sleep(50);
}
}
return isolate->heap()->undefined_value();
diff --git a/src/runtime.h b/src/runtime.h
index c28972c..8ef4c81 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -96,7 +96,7 @@
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(WaitUntilOptimized, 1, 1) \
+ F(CompleteOptimization, 1, 1) \
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
diff --git a/src/scanner.h b/src/scanner.h
index 92418f7..368ec1b 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -178,6 +178,11 @@
bool is_ascii() { return is_ascii_; }
+ bool is_contextual_keyword(Vector<const char> keyword) {
+ return is_ascii() && keyword.length() == position_ &&
+ (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
+ }
+
Vector<const uc16> utf16_literal() {
ASSERT(!is_ascii_);
ASSERT((position_ & 0x1) == 0);
@@ -325,6 +330,10 @@
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->is_ascii();
}
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
+ }
int literal_length() const {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->length();
@@ -361,6 +370,10 @@
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_ascii();
}
+ bool is_next_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_contextual_keyword(keyword);
+ }
int next_literal_length() const {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->length();
diff --git a/src/type-info.cc b/src/type-info.cc
index b284062..5113c55 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -78,9 +78,28 @@
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
- return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Object* value = dictionary_->ValueAt(entry);
+ if (value->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(value);
+ return Handle<Object>(cell->value(), isolate_);
+ } else {
+ return Handle<Object>(value, isolate_);
+ }
+ }
+ return Handle<Object>::cast(isolate_->factory()->undefined_value());
+}
+
+
+Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetInfoCell(
+ TypeFeedbackId ast_id) {
+ int entry = dictionary_->FindEntry(IdToKey(ast_id));
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+ dictionary_->ValueAt(entry));
+ return Handle<JSGlobalPropertyCell>(cell, isolate_);
+ }
+ return Handle<JSGlobalPropertyCell>::null();
}
@@ -316,21 +335,12 @@
}
-ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- if (info->IsSmi()) {
- return static_cast<ElementsKind>(Smi::cast(*info)->value());
- } else {
- // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf
- // reasons. Is there a better fix?
- if (FLAG_packed_arrays) {
- return FAST_SMI_ELEMENTS;
- } else {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
- }
+Handle<JSGlobalPropertyCell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(
+ CallNew* expr) {
+ return GetInfoCell(expr->CallNewFeedbackId());
}
+
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
@@ -749,12 +759,13 @@
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
TypeFeedbackId ast_id = cache->AstId(i);
- Object* value = cache->Cell(i)->value();
+ JSGlobalPropertyCell* cell = cache->Cell(i);
+ Object* value = cell->value();
if (value->IsSmi() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
*native_context_))) {
- SetInfo(ast_id, value);
+ SetInfo(ast_id, cell);
}
}
}
diff --git a/src/type-info.h b/src/type-info.h
index 15a0b81..53a83be 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -283,7 +283,7 @@
CheckType GetCallCheckType(Call* expr);
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- ElementsKind GetCallNewElementsKind(CallNew* expr);
+ Handle<JSGlobalPropertyCell> GetCallNewAllocationInfoCell(CallNew* expr);
Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
@@ -338,11 +338,11 @@
// Returns an element from the backing store. Returns undefined if
// there is no information.
- public:
- // TODO(mvstanton): how to get this information without making the method
- // public?
Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ // Return the cell that contains type feedback.
+ Handle<JSGlobalPropertyCell> GetInfoCell(TypeFeedbackId ast_id);
+
private:
Handle<Context> native_context_;
Isolate* isolate_;
diff --git a/src/types.cc b/src/types.cc
index 2a96055..f7fbd2d 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -42,8 +42,14 @@
}
return bitset;
} else {
- Map* map =
- this->is_class() ? *this->as_class() : this->as_constant()->map();
+ Map* map = NULL;
+ if (this->is_class()) {
+ map = *this->as_class();
+ } else {
+ v8::internal::Object* value = this->as_constant()->value();
+ if (value->IsSmi()) return kSmi;
+ map = HeapObject::cast(value)->map();
+ }
switch (map->instance_type()) {
case STRING_TYPE:
case ASCII_STRING_TYPE:
@@ -126,7 +132,8 @@
return this->is_class() && *this->as_class() == *that->as_class();
}
if (that->is_constant()) {
- return this->is_constant() && *this->as_constant() == *that->as_constant();
+ return this->is_constant() &&
+ this->as_constant()->value() == that->as_constant()->value();
}
// (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
@@ -169,7 +176,8 @@
return that->is_class() && *this->as_class() == *that->as_class();
}
if (this->is_constant()) {
- return that->is_constant() && *this->as_constant() == *that->as_constant();
+ return that->is_constant() &&
+ this->as_constant()->value() == that->as_constant()->value();
}
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
diff --git a/src/types.h b/src/types.h
index 018969f..6db9bfb 100644
--- a/src/types.h
+++ b/src/types.h
@@ -79,8 +79,8 @@
// existing assumptions or tests.
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets via smis. Class and Constant are heap pointers to the respective
-// argument. Only unions containing Class'es or Constant's require allocation.
+// bitsets via smis. Class is a heap pointer to the respective map. Only
+// Constant's, or unions containing Class'es or Constant's, require allocation.
//
// The type representation is heap-allocated, so cannot (currently) be used in
// a parallel compilation context.
@@ -113,8 +113,10 @@
static Type* Class(Handle<Map> map) { return from_handle(map); }
static Type* Constant(Handle<HeapObject> value) {
- ASSERT(!value->IsMap() && !value->IsFixedArray());
- return from_handle(value);
+ return Constant(value, value->GetIsolate());
+ }
+ static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
+ return from_handle(isolate->factory()->NewBox(value));
}
static Type* Union(Handle<Type> type1, Handle<Type> type2);
@@ -159,15 +161,12 @@
bool is_bitset() { return this->IsSmi(); }
bool is_class() { return this->IsMap(); }
- bool is_constant() { return !(is_bitset() || is_class() || is_union()); }
+ bool is_constant() { return this->IsBox(); }
bool is_union() { return this->IsFixedArray(); }
int as_bitset() { return Smi::cast(this)->value(); }
Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
- Handle<HeapObject> as_constant() {
- ASSERT(is_constant());
- return Handle<HeapObject>::cast(handle());
- }
+ Handle<Box> as_constant() { return Handle<Box>::cast(handle()); }
Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
Handle<Type> handle() { return handle_via_isolate_of(this); }
diff --git a/src/typing.cc b/src/typing.cc
index 3e4144e..4ba6721 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -224,6 +224,13 @@
}
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ CHECK_ALIVE(Visit(stmt->iterable()));
+ CHECK_ALIVE(Visit(stmt->body()));
+}
+
+
void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
CHECK_ALIVE(Visit(stmt->try_block()));
diff --git a/src/v8natives.js b/src/v8natives.js
index d444613..e168b71 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -928,7 +928,6 @@
EnqueueSpliceRecord(obj,
new_length < old_length ? new_length : old_length,
removed,
- removed.length,
new_length > old_length ? new_length - old_length : 0);
}
if (threw) {
@@ -967,7 +966,7 @@
}
if (emit_splice) {
EndPerformSplice(obj);
- EnqueueSpliceRecord(obj, length, [], 0, index + 1 - length);
+ EnqueueSpliceRecord(obj, length, [], index + 1 - length);
}
return true;
}
diff --git a/src/version.cc b/src/version.cc
index bfb413f..d84f9ea 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 19
-#define BUILD_NUMBER 10
+#define BUILD_NUMBER 11
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index ed4896d..62c6073 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1046,9 +1046,8 @@
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
@@ -1224,6 +1223,64 @@
}
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(rax, &convert);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 4a469f7..2d6e4cb 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -92,10 +92,8 @@
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
+ info()->CommitDependentMaps(code);
+
for (int i = 0 ; i < transition_maps_.length(); i++) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
@@ -5078,11 +5076,7 @@
ASSERT(prototypes->length() == maps->length());
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- } else {
+ if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 07a948c..462a7c5 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -57,7 +57,6 @@
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -362,7 +361,6 @@
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index e50be08..59a833e 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -2294,10 +2294,11 @@
needs_write_barrier_for_map) ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject())) {
- return AssignEnvironment(result);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject()) {
+ if (!instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
}
return result;
}
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 4a4a84e..efb2a65 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -120,7 +120,7 @@
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
- masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ masm_(zone->isolate(), NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),