Version 3.12.15
Added PRESERVE_ASCII_NULL option to String::WriteAscii. (issue 2252)
Added dependency to HLoadKeyed* instructions to prevent invalid hoisting. (Chromium issue 137768)
Enabled building d8 for Android on Mac.
Interpret negative hexadecimal literals as NaN. (issue 2240)
Expose counters in javascript when using --track-gc-object-stats.
Enabled building and testing V8 on Android IA.
Added --trace-parse flag to parser.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@12180 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index cd35910..16bfb55 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -72,6 +72,7 @@
execution.cc
extensions/externalize-string-extension.cc
extensions/gc-extension.cc
+ extensions/statistics-extension.cc
factory.cc
fast-dtoa.cc
fixed-dtoa.cc
@@ -105,6 +106,7 @@
objects-visiting.cc
objects.cc
once.cc
+ optimizing-compiler-thread.cc
parser.cc
preparse-data.cc
preparser.cc
diff --git a/src/api.cc b/src/api.cc
index 8218ad4..cd0684b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3847,6 +3847,9 @@
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (options & HINT_MANY_WRITES_EXPECTED) {
+ FlattenString(str); // Flatten the string for efficiency.
+ }
int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
@@ -3903,11 +3906,7 @@
// Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- FlattenString(str);
- }
+
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@@ -3949,8 +3948,9 @@
c,
unibrow::Utf16::kNoPreviousCharacter);
if (pos + written <= capacity) {
- for (int j = 0; j < written; j++)
+ for (int j = 0; j < written; j++) {
buffer[pos + j] = intermediate[j];
+ }
pos += written;
nchars++;
} else {
@@ -3963,8 +3963,9 @@
}
if (nchars_ref != NULL) *nchars_ref = nchars;
if (!(options & NO_NULL_TERMINATION) &&
- (i == len && (capacity == -1 || pos < capacity)))
+ (i == len && (capacity == -1 || pos < capacity))) {
buffer[pos++] = '\0';
+ }
return pos;
}
@@ -3977,28 +3978,45 @@
if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
LOG_API(isolate, "String::WriteAscii");
ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str); // Flatten the string for efficiency.
}
+
+ if (str->IsAsciiRepresentation()) {
+ // WriteToFlat is faster than using the StringInputBuffer.
+ if (length == -1) length = str->length() + 1;
+ int len = i::Min(length, str->length() - start);
+ i::String::WriteToFlat(*str, buffer, start, start + len);
+ if (!(options & PRESERVE_ASCII_NULL)) {
+ for (int i = 0; i < len; i++) {
+ if (buffer[i] == '\0') buffer[i] = ' ';
+ }
+ }
+ if (!(options & NO_NULL_TERMINATION) && length > len) {
+ buffer[len] = '\0';
+ }
+ return len;
+ }
+
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
int end = length;
- if ( (length == -1) || (length > str->length() - start) )
+ if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start;
+ }
if (end < 0) return 0;
write_input_buffer.Reset(start, *str);
int i;
for (i = 0; i < end; i++) {
char c = static_cast<char>(write_input_buffer.GetNext());
- if (c == '\0') c = ' ';
+ if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
buffer[i] = c;
}
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length))
+ if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
buffer[i] = '\0';
+ }
return i;
}
@@ -4017,7 +4035,7 @@
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str);
}
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
@@ -4203,8 +4221,9 @@
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(
reinterpret_cast<i::Address>(value), i::TENURED);
- if (!foreign.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ if (!foreign.is_null()) {
+ Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ }
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -6422,12 +6441,6 @@
Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
v->VisitPointers(start, start + saved_contexts_.length());
}
-
- for (DeferredHandles* deferred = deferred_handles_head_;
- deferred != NULL;
- deferred = deferred->next_) {
- deferred->Iterate(v);
- }
}
@@ -6448,8 +6461,8 @@
DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
- DeferredHandles* deferred = new DeferredHandles(
- deferred_handles_head_, isolate()->handle_scope_data()->next, this);
+ DeferredHandles* deferred =
+ new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
@@ -6470,41 +6483,12 @@
ASSERT(prev_limit == NULL || !blocks_.is_empty());
ASSERT(!blocks_.is_empty() && prev_limit != NULL);
- deferred_handles_head_ = deferred;
ASSERT(last_handle_before_deferred_block_ != NULL);
last_handle_before_deferred_block_ = NULL;
return deferred;
}
-void HandleScopeImplementer::DestroyDeferredHandles(DeferredHandles* deferred) {
-#ifdef DEBUG
- DeferredHandles* deferred_iterator = deferred;
- while (deferred_iterator->previous_ != NULL) {
- deferred_iterator = deferred_iterator->previous_;
- }
- ASSERT(deferred_handles_head_ == deferred_iterator);
-#endif
- if (deferred_handles_head_ == deferred) {
- deferred_handles_head_ = deferred_handles_head_->next_;
- }
- if (deferred->next_ != NULL) {
- deferred->next_->previous_ = deferred->previous_;
- }
- if (deferred->previous_ != NULL) {
- deferred->previous_->next_ = deferred->next_;
- }
- for (int i = 0; i < deferred->blocks_.length(); i++) {
-#ifdef DEBUG
- HandleScope::ZapRange(deferred->blocks_[i],
- &deferred->blocks_[i][kHandleBlockSize]);
-#endif
- if (spare_ != NULL) DeleteArray(spare_);
- spare_ = deferred->blocks_[i];
- }
-}
-
-
void HandleScopeImplementer::BeginDeferredScope() {
ASSERT(last_handle_before_deferred_block_ == NULL);
last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
@@ -6512,7 +6496,14 @@
DeferredHandles::~DeferredHandles() {
- impl_->DestroyDeferredHandles(this);
+ isolate_->UnlinkDeferredHandles(this);
+
+ for (int i = 0; i < blocks_.length(); i++) {
+#ifdef DEBUG
+ HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
+#endif
+ isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
+ }
}
diff --git a/src/api.h b/src/api.h
index 6be4474..ea70dca 100644
--- a/src/api.h
+++ b/src/api.h
@@ -397,13 +397,12 @@
~DeferredHandles();
private:
- DeferredHandles(DeferredHandles* next, Object** first_block_limit,
- HandleScopeImplementer* impl)
- : next_(next),
+ DeferredHandles(Object** first_block_limit, Isolate* isolate)
+ : next_(NULL),
previous_(NULL),
first_block_limit_(first_block_limit),
- impl_(impl) {
- if (next != NULL) next->previous_ = this;
+ isolate_(isolate) {
+ isolate->LinkDeferredHandles(this);
}
void Iterate(ObjectVisitor* v);
@@ -412,9 +411,10 @@
DeferredHandles* next_;
DeferredHandles* previous_;
Object** first_block_limit_;
- HandleScopeImplementer* impl_;
+ Isolate* isolate_;
friend class HandleScopeImplementer;
+ friend class Isolate;
};
@@ -436,8 +436,7 @@
saved_contexts_(0),
spare_(NULL),
call_depth_(0),
- last_handle_before_deferred_block_(NULL),
- deferred_handles_head_(NULL) { }
+ last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
DeleteArray(spare_);
@@ -475,13 +474,18 @@
inline List<internal::Object**>* blocks() { return &blocks_; }
Isolate* isolate() const { return isolate_; }
+ void ReturnBlock(Object** block) {
+ ASSERT(block != NULL);
+ if (spare_ != NULL) DeleteArray(spare_);
+ spare_ = block;
+ }
+
private:
void ResetAfterArchive() {
blocks_.Initialize(0);
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
spare_ = NULL;
- deferred_handles_head_ = NULL;
last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
}
@@ -490,7 +494,6 @@
ASSERT(blocks_.length() == 0);
ASSERT(entered_contexts_.length() == 0);
ASSERT(saved_contexts_.length() == 0);
- ASSERT(deferred_handles_head_ == NULL);
blocks_.Free();
entered_contexts_.Free();
saved_contexts_.Free();
@@ -503,7 +506,6 @@
void BeginDeferredScope();
DeferredHandles* Detach(Object** prev_limit);
- void DestroyDeferredHandles(DeferredHandles* handles);
Isolate* isolate_;
List<internal::Object**> blocks_;
@@ -514,7 +516,6 @@
Object** spare_;
int call_depth_;
Object** last_handle_before_deferred_block_;
- DeferredHandles* deferred_handles_head_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d875796..42343b1 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1834,6 +1834,7 @@
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1855,6 +1856,7 @@
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 578bd81..12a914b 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -697,6 +697,43 @@
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(pc, r2);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+
+ __ push(r1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e767001..5aadc3c 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -56,8 +56,9 @@
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index a24f310..f55956c 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1144,7 +1144,7 @@
// We got a map in register r0. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(r0, r1, r2);
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kLastAddedOffset));
+ __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumCacheOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 4f63b50..91afec5 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1835,7 +1835,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
@@ -1847,7 +1848,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1866,7 +1868,8 @@
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
@@ -1894,7 +1897,8 @@
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -1911,7 +1915,8 @@
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -1932,7 +1937,8 @@
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index c7910a3..9982bf0 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -859,6 +859,7 @@
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index cf0c35c..d94a1fe 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2784,7 +2784,12 @@
Register scratch = scratch0();
// Load the result.
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
@@ -2811,8 +2816,9 @@
DwVfpRegister result = ToDoubleRegister(instr->result());
Register scratch = scratch0();
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2824,14 +2830,15 @@
}
Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
+ ? Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ (instr->additional_index() << element_size_shift)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2844,6 +2851,42 @@
}
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ add(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ return MemOperand(base, key, LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, key, LSR, 1);
+ }
+ }
+
+ if (shift_size >= 0) {
+ return MemOperand(base, scratch0(), LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, scratch0(), LSR, 1);
+ }
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
@@ -2859,15 +2902,17 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key << shift_size)
+ ? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
@@ -2878,15 +2923,10 @@
}
} else {
Register result = ToRegister(instr->result());
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset)
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@@ -3803,7 +3843,12 @@
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
@@ -3848,9 +3893,11 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
- ? Operand((constant_key << shift_size) +
+ ? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@@ -3868,7 +3915,7 @@
vs);
}
- __ vstr(value, scratch, instr->additional_index() << shift_size);
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -3888,15 +3935,18 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
- : Operand(key, LSL, shift_size));
+ Operand operand(key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
@@ -3906,16 +3956,10 @@
}
} else {
Register value(ToRegister(instr->value()));
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size))
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -5364,7 +5408,7 @@
Register scratch = ToRegister(instr->scratch());
__ LoadInstanceDescriptors(map, result, scratch);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index c871a57..d380f4b 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -132,6 +132,15 @@
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 84e866e..88e5dec 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3723,7 +3723,7 @@
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
// index field does not contain a smi.
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kLastAddedOffset));
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(r3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
diff --git a/src/ast.cc b/src/ast.cc
index adbad41..e7f70c5 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -125,7 +125,7 @@
value_(value),
pos_(pos),
binary_operation_(NULL),
- compound_load_id_(kNoNumber),
+ compound_load_id_(GetNextId(isolate)),
assignment_id_(GetNextId(isolate)),
block_start_(false),
block_end_(false),
diff --git a/src/ast.h b/src/ast.h
index e38def8..8c9606a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -37,7 +37,7 @@
#include "list-inl.h"
#include "runtime.h"
#include "small-pointer-list.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "token.h"
#include "utils.h"
#include "variables.h"
@@ -211,7 +211,7 @@
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const { return kInvalid; }
+ virtual Type node_type() const = 0;
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -220,9 +220,6 @@
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- virtual Declaration* AsDeclaration() { return NULL; }
- virtual Statement* AsStatement() { return NULL; }
- virtual Expression* AsExpression() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
@@ -252,8 +249,6 @@
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
- virtual Statement* AsStatement() { return this; }
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -314,8 +309,6 @@
return 0;
}
- virtual Expression* AsExpression() { return this; }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -365,8 +358,8 @@
test_id_(GetNextId(isolate)) {}
private:
- int id_;
- int test_id_;
+ const int id_;
+ const int test_id_;
};
@@ -408,8 +401,8 @@
ZoneStringList* labels_;
Type type_;
Label break_target_;
- int entry_id_;
- int exit_id_;
+ const int entry_id_;
+ const int exit_id_;
};
@@ -456,8 +449,6 @@
virtual InitializationFlag initialization() const = 0;
virtual bool IsInlineable() const;
- virtual Declaration* AsDeclaration() { return this; }
-
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
@@ -707,7 +698,7 @@
private:
Statement* body_;
Label continue_target_;
- int osr_entry_id_;
+ const int osr_entry_id_;
};
@@ -746,8 +737,8 @@
private:
Expression* cond_;
int condition_position_;
- int continue_id_;
- int back_edge_id_;
+ const int continue_id_;
+ const int back_edge_id_;
};
@@ -787,7 +778,7 @@
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
- int body_id_;
+ const int body_id_;
};
@@ -846,8 +837,8 @@
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- int continue_id_;
- int body_id_;
+ const int continue_id_;
+ const int body_id_;
};
@@ -883,8 +874,8 @@
private:
Expression* each_;
Expression* enumerable_;
- int body_id_;
- int prepare_id_;
+ const int body_id_;
+ const int prepare_id_;
};
@@ -1018,8 +1009,8 @@
OBJECT_ONLY
};
CompareTypeFeedback compare_type_;
- int compare_id_;
- int entry_id_;
+ const int compare_id_;
+ const int entry_id_;
};
@@ -1088,9 +1079,9 @@
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
- int if_id_;
- int then_id_;
- int else_id_;
+ const int if_id_;
+ const int then_id_;
+ const int else_id_;
};
@@ -1107,6 +1098,7 @@
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+ virtual Type node_type() const { return kInvalid; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1453,7 +1445,7 @@
private:
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
- int first_element_id_;
+ const int first_element_id_;
};
@@ -1625,7 +1617,7 @@
Handle<JSObject> holder_;
Handle<JSGlobalPropertyCell> cell_;
- int return_id_;
+ const int return_id_;
};
@@ -1666,7 +1658,7 @@
bool is_monomorphic_;
Handle<JSFunction> target_;
- int return_id_;
+ const int return_id_;
};
@@ -1726,13 +1718,9 @@
op_(op),
expression_(expression),
pos_(pos),
- materialize_true_id_(AstNode::kNoNumber),
- materialize_false_id_(AstNode::kNoNumber) {
+ materialize_true_id_(GetNextId(isolate)),
+ materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
- if (op == Token::NOT) {
- materialize_true_id_ = GetNextId(isolate);
- materialize_false_id_ = GetNextId(isolate);
- }
}
private:
@@ -1742,8 +1730,8 @@
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
- int materialize_true_id_;
- int materialize_false_id_;
+ const int materialize_true_id_;
+ const int materialize_false_id_;
};
@@ -1769,11 +1757,13 @@
Expression* left,
Expression* right,
int pos)
- : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
+ : Expression(isolate),
+ op_(op),
+ left_(left),
+ right_(right),
+ pos_(pos),
+ right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
- right_id_ = (op == Token::AND || op == Token::OR)
- ? GetNextId(isolate)
- : AstNode::kNoNumber;
}
private:
@@ -1781,9 +1771,9 @@
Expression* left_;
Expression* right_;
int pos_;
- // The short-circuit logical operations have an AST ID for their
+ // The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
- int right_id_;
+ const int right_id_;
};
@@ -1834,8 +1824,8 @@
bool is_monomorphic_;
Expression* expression_;
int pos_;
- int assignment_id_;
- int count_id_;
+ const int assignment_id_;
+ const int count_id_;
SmallMapList receiver_types_;
};
@@ -1925,8 +1915,8 @@
Expression* else_expression_;
int then_expression_position_;
int else_expression_position_;
- int then_id_;
- int else_id_;
+ const int then_id_;
+ const int else_id_;
};
@@ -1980,7 +1970,6 @@
if (is_compound()) {
binary_operation_ =
factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
- compound_load_id_ = GetNextId(isolate);
}
}
@@ -1990,8 +1979,8 @@
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
- int compound_load_id_;
- int assignment_id_;
+ const int compound_load_id_;
+ const int assignment_id_;
bool block_start_;
bool block_end_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 726670a..c72af5c 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -42,6 +42,7 @@
#include "snapshot.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
+#include "extensions/statistics-extension.h"
namespace v8 {
namespace internal {
@@ -95,6 +96,7 @@
extensions_cache_.Initialize(create_heap_objects);
GCExtension::Register();
ExternalizeStringExtension::Register();
+ StatisticsExtension::Register();
}
@@ -253,16 +255,16 @@
Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
- Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode);
+ void SetFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function);
- Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode propertyMode);
+ void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode propertyMode);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -381,55 +383,54 @@
}
-Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
DescriptorArray::WhitenessWitness witness(*descriptors);
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
+ Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
+ map->set_instance_descriptors(*descriptors);
+
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
- CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
- CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
-
- descriptors->Sort(witness);
- return descriptors;
}
Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
return map;
}
@@ -485,8 +486,6 @@
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
- object_function_map->set_instance_descriptors(
- heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -525,48 +524,48 @@
}
-Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetStrictFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
-
DescriptorArray::WhitenessWitness witness(*descriptors);
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<AccessorPair> arguments(factory()->NewAccessorPair());
+ Handle<AccessorPair> caller(factory()->NewAccessorPair());
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ map->set_instance_descriptors(*descriptors);
+
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<AccessorPair> arguments(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
- descriptors->Append(&d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<AccessorPair> caller(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
- descriptors->Append(&d, witness);
+ map->AppendDescriptor(&d, witness);
}
-
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Append(&d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
-
- descriptors->Sort(witness);
- return descriptors;
}
@@ -594,9 +593,7 @@
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeStrictFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetStrictFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
map->set_prototype(*empty_function);
return map;
@@ -869,19 +866,25 @@
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
- Handle<DescriptorArray> array_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+ Handle<DescriptorArray> array_descriptors(factory->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ array_function->initial_map()->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
// array_function is used internally. JS code creating array object should
// search for the 'Array' property on the global object and use that one
// as the constructor. 'Array' property on a global object can be
// overwritten by JS code.
global_context()->set_array_function(*array_function);
- array_function->initial_map()->set_instance_descriptors(*array_descriptors);
}
{ // --- N u m b e r ---
@@ -908,19 +911,22 @@
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
global_context()->set_string_function(*string_fun);
- // Add 'length' property to strings.
- Handle<DescriptorArray> string_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::StringLength),
- static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE |
- READ_ONLY));
Handle<Map> string_map =
Handle<Map>(global_context()->string_function()->initial_map());
+ Handle<DescriptorArray> string_descriptors(factory->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*string_descriptors);
+
+ Handle<Foreign> string_length(
+ factory->NewForeign(&Accessors::StringLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
string_map->set_instance_descriptors(*string_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
+ string_map->AppendDescriptor(&d, witness);
+ }
}
{ // --- D a t e ---
@@ -947,37 +953,39 @@
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
- DescriptorArray::WhitenessWitness witness(*descriptors);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ initial_map->set_instance_descriptors(*descriptors);
+
{
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_symbol(),
JSRegExp::kSourceFieldIndex,
final);
- descriptors->Append(&field, witness);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_symbol(),
JSRegExp::kGlobalFieldIndex,
final);
- descriptors->Append(&field, witness);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
final);
- descriptors->Append(&field, witness);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
final);
- descriptors->Append(&field, witness);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.5.
@@ -986,16 +994,14 @@
FieldDescriptor field(heap->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
writable);
- descriptors->Append(&field, witness);
+ initial_map->AppendDescriptor(&field, witness);
}
- descriptors->Sort(witness);
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
- initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
@@ -1129,31 +1135,31 @@
caller->set_getter(*throw_function);
caller->set_setter(*throw_function);
+ // Create the map. Allocate one in-object field for length.
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
+ Heap::kArgumentsObjectSizeStrict);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*descriptors);
+ map->set_instance_descriptors(*descriptors);
+
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
- descriptors->Append(&d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // callee
CallbacksDescriptor d(*factory->callee_symbol(),
*callee,
attributes);
- descriptors->Append(&d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // caller
CallbacksDescriptor d(*factory->caller_symbol(),
*caller,
attributes);
- descriptors->Append(&d, witness);
+ map->AppendDescriptor(&d, witness);
}
- descriptors->Sort(witness);
- // Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
- map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(true);
map->set_prototype(global_context()->object_function()->prototype());
map->set_pre_allocated_property_fields(1);
@@ -1472,116 +1478,132 @@
SetPrototype(script_fun, prototype);
global_context()->set_script_function(*script_fun);
- // Add 'source' and 'data' property to scripts.
- PropertyAttributes common_attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Foreign> foreign_source =
- factory()->NewForeign(&Accessors::ScriptSource);
- Handle<DescriptorArray> script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->LookupAsciiSymbol("source"),
- foreign_source,
- common_attributes);
- Handle<Foreign> foreign_name =
- factory()->NewForeign(&Accessors::ScriptName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("name"),
- foreign_name,
- common_attributes);
- Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("id"),
- foreign_id,
- common_attributes);
- Handle<Foreign> foreign_line_offset =
- factory()->NewForeign(&Accessors::ScriptLineOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_offset"),
- foreign_line_offset,
- common_attributes);
- Handle<Foreign> foreign_column_offset =
- factory()->NewForeign(&Accessors::ScriptColumnOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("column_offset"),
- foreign_column_offset,
- common_attributes);
- Handle<Foreign> foreign_data =
- factory()->NewForeign(&Accessors::ScriptData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("data"),
- foreign_data,
- common_attributes);
- Handle<Foreign> foreign_type =
- factory()->NewForeign(&Accessors::ScriptType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("type"),
- foreign_type,
- common_attributes);
- Handle<Foreign> foreign_compilation_type =
- factory()->NewForeign(&Accessors::ScriptCompilationType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("compilation_type"),
- foreign_compilation_type,
- common_attributes);
- Handle<Foreign> foreign_line_ends =
- factory()->NewForeign(&Accessors::ScriptLineEnds);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_ends"),
- foreign_line_ends,
- common_attributes);
- Handle<Foreign> foreign_context_data =
- factory()->NewForeign(&Accessors::ScriptContextData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("context_data"),
- foreign_context_data,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script =
- factory()->NewForeign(&Accessors::ScriptEvalFromScript);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script"),
- foreign_eval_from_script,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script_position =
- factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script_position"),
- foreign_eval_from_script_position,
- common_attributes);
- Handle<Foreign> foreign_eval_from_function_name =
- factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_function_name"),
- foreign_eval_from_function_name,
- common_attributes);
-
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+
+ Handle<DescriptorArray> script_descriptors(
+ factory()->NewDescriptorArray(13));
+ DescriptorArray::WhitenessWitness witness(*script_descriptors);
+
+ Handle<Foreign> script_source(
+ factory()->NewForeign(&Accessors::ScriptSource));
+ Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
+ Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+ Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
+ Handle<String> line_offset_symbol(
+ factory()->LookupAsciiSymbol("line_offset"));
+ Handle<Foreign> script_line_offset(
+ factory()->NewForeign(&Accessors::ScriptLineOffset));
+ Handle<String> column_offset_symbol(
+ factory()->LookupAsciiSymbol("column_offset"));
+ Handle<Foreign> script_column_offset(
+ factory()->NewForeign(&Accessors::ScriptColumnOffset));
+ Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+ Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
+ Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+ Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
+ Handle<String> compilation_type_symbol(
+ factory()->LookupAsciiSymbol("compilation_type"));
+ Handle<Foreign> script_compilation_type(
+ factory()->NewForeign(&Accessors::ScriptCompilationType));
+ Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+ Handle<Foreign> script_line_ends(
+ factory()->NewForeign(&Accessors::ScriptLineEnds));
+ Handle<String> context_data_symbol(
+ factory()->LookupAsciiSymbol("context_data"));
+ Handle<Foreign> script_context_data(
+ factory()->NewForeign(&Accessors::ScriptContextData));
+ Handle<String> eval_from_script_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script"));
+ Handle<Foreign> script_eval_from_script(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScript));
+ Handle<String> eval_from_script_position_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script_position"));
+ Handle<Foreign> script_eval_from_script_position(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
+ Handle<String> eval_from_function_name_symbol(
+ factory()->LookupAsciiSymbol("eval_from_function_name"));
+ Handle<Foreign> script_eval_from_function_name(
+ factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
script_map->set_instance_descriptors(*script_descriptors);
+ {
+ CallbacksDescriptor d(
+ *factory()->source_symbol(), *script_source, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*id_symbol, *script_id, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *column_offset_symbol, *script_column_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*data_symbol, *script_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*type_symbol, *script_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *compilation_type_symbol, *script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *context_data_symbol, *script_context_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_symbol, *script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_position_symbol,
+ *script_eval_from_script_position,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_function_name_symbol,
+ *script_eval_from_function_name,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
// Allocate the empty script.
Handle<Script> script = factory()->NewScript(factory()->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -1637,15 +1659,20 @@
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
- Handle<DescriptorArray> array_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->length_symbol(),
- factory()->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+ Handle<DescriptorArray> array_descriptors(factory()->NewDescriptorArray(1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
- array_function->initial_map()->set_instance_descriptors(
- *array_descriptors);
+ Handle<Foreign> array_length(factory()->NewForeign(
+ &Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ array_function->initial_map()->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(
+ *factory()->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
global_context()->set_internal_array_function(*array_function);
}
@@ -1734,34 +1761,37 @@
Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
+ initial_map->set_instance_descriptors(*reresult_descriptors);
- JSFunction* array_function = global_context()->array_function();
- Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors());
- int old = array_descriptors->SearchWithCache(heap()->length_symbol());
- reresult_descriptors->CopyFrom(0, *array_descriptors, old, witness);
-
- reresult_descriptors->SetLastAdded(0);
-
+ {
+ JSFunction* array_function = global_context()->array_function();
+ Handle<DescriptorArray> array_descriptors(
+ array_function->initial_map()->instance_descriptors());
+ String* length = heap()->length_symbol();
+ int old = array_descriptors->SearchWithCache(length);
+ ASSERT(old != DescriptorArray::kNotFound);
+ CallbacksDescriptor desc(length,
+ array_descriptors->GetValue(old),
+ array_descriptors->GetDetails(old).attributes());
+ initial_map->AppendDescriptor(&desc, witness);
+ }
{
FieldDescriptor index_field(heap()->index_symbol(),
JSRegExpResult::kIndexIndex,
NONE);
- reresult_descriptors->Append(&index_field, witness);
+ initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_symbol(),
JSRegExpResult::kInputIndex,
NONE);
- reresult_descriptors->Append(&input_field, witness);
+ initial_map->AppendDescriptor(&input_field, witness);
}
- reresult_descriptors->Sort(witness);
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
initial_map->set_unused_property_fields(0);
- initial_map->set_instance_descriptors(*reresult_descriptors);
global_context()->set_regexp_result_map(*initial_map);
}
@@ -1997,6 +2027,9 @@
if (FLAG_expose_externalize_string) {
InstallExtension("v8/externalize", &extension_states);
}
+ if (FLAG_track_gc_object_stats) {
+ InstallExtension("v8/statistics", &extension_states);
+ }
if (extensions == NULL) return true;
// Install required extensions
diff --git a/src/builtins.h b/src/builtins.h
index 3ea3393..c0a850a 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -66,6 +66,8 @@
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@@ -80,6 +82,8 @@
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
@@ -347,6 +351,8 @@
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
+ static void Generate_InRecompileQueue(MacroAssembler* masm);
+ static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
diff --git a/src/compiler.cc b/src/compiler.cc
index f93ce6b..998f1bc 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -207,6 +207,7 @@
}
status = compiler.OptimizeGraph();
if (status != OptimizingCompiler::SUCCEEDED) {
+ status = compiler.AbortOptimization();
return status != OptimizingCompiler::FAILED;
}
status = compiler.GenerateAndInstallCode();
@@ -340,17 +341,20 @@
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+ AssertNoAllocation no_gc;
+ NoHandleAllocation no_handles;
+
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
SmartArrayPointer<char> bailout_reason;
if (!graph_->Optimize(&bailout_reason)) {
if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
- return AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
} else {
chunk_ = LChunk::NewChunk(graph_);
if (chunk_ == NULL) {
- return AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
}
}
return SetLastStatus(SUCCEEDED);
@@ -658,21 +662,90 @@
}
-bool Compiler::CompileLazy(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState state(isolate, COMPILER);
-
- PostponeInterruptsScope postpone(isolate);
-
+static bool InstallFullCode(CompilationInfo* info) {
+ // Update the shared function info with the compiled code and the
+ // scope info. Please note, that the order of the shared function
+ // info initialization is important since set_scope_info might
+ // trigger a GC, causing the ASSERT below to be invalid if the code
+ // was flushed. By setting the code object last we avoid this.
Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
+ Handle<Code> code = info->code();
+ Handle<JSFunction> function = info->closure();
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+ shared->set_code(*code);
+ if (!function.is_null()) {
+ function->ReplaceCode(*code);
+ ASSERT(!function->IsOptimized());
+ }
+ // Set the expected number of properties for instances.
+ FunctionLiteral* lit = info->function();
+ int expected = lit->expected_property_count();
+ SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+ // Set the optimization hints after performing lazy compilation, as
+ // these are not set when the function is set up as a lazily
+ // compiled function.
+ shared->SetThisPropertyAssignmentsInfo(
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ shared->set_code_age(0);
+ shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+ shared->set_ast_node_count(lit->ast_node_count());
+
+ if (V8::UseCrankshaft()&&
+ !function.is_null() &&
+ !shared->optimization_disabled()) {
+ // If we're asked to always optimize, we compile the optimized
+ // version of the function right away - unless the debugger is
+ // active as it makes no sense to compile optimized code then.
+ if (FLAG_always_opt &&
+ !Isolate::Current()->DebuggerHasBreakPoints()) {
+ CompilationInfoWithZone optimized(function);
+ optimized.SetOptimizing(AstNode::kNoNumber);
+ return Compiler::CompileLazy(&optimized);
+ }
+ }
+ return true;
+}
+
+
+static void InstallCodeCommon(CompilationInfo* info) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<Code> code = info->code();
+ ASSERT(!code.is_null());
+
+ // Set optimizable to false if this is disallowed by the shared
+ // function info, e.g., we might have flushed the code and must
+ // reset this bit when lazy compiling the code again.
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
+ Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+ Handle<Code> code = info->code();
+ Handle<JSFunction> function = info->closure();
+ if (FLAG_cache_optimized_code && code->kind() == Code::OPTIMIZED_FUNCTION) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<FixedArray> literals(function->literals());
+ Handle<Context> global_context(function->context()->global_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(
+ shared, global_context, code, literals);
+ }
+}
+
+
+static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
if (FLAG_cache_optimized_code && info->IsOptimizing()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
Handle<Context> global_context(function->context()->global_context());
@@ -688,6 +761,25 @@
return true;
}
}
+ return false;
+}
+
+
+bool Compiler::CompileLazy(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+
+ ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
+
+ // The VM is in the COMPILER state until exiting this function.
+ VMState state(isolate, COMPILER);
+
+ PostponeInterruptsScope postpone(isolate);
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ int compiled_size = shared->end_position() - shared->start_position();
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
+
+ if (InstallCodeFromOptimizedCodeMap(info)) return true;
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info, kNoParsingFlags)) {
@@ -707,78 +799,17 @@
isolate->StackOverflow();
}
} else {
- ASSERT(!info->code().is_null());
- Handle<Code> code = info->code();
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- Handle<JSFunction> function = info->closure();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty());
- function->ReplaceCode(*code);
- if (FLAG_cache_optimized_code &&
- code->kind() == Code::OPTIMIZED_FUNCTION) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<FixedArray> literals(function->literals());
- Handle<Context> global_context(function->context()->global_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, global_context, code, literals);
- }
+ info->closure()->ReplaceCode(*code);
+ InsertCodeIntoOptimizedCodeMap(info);
+ return true;
} else {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
- shared->set_scope_info(*scope_info);
- shared->set_code(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_code_age(0);
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_dont_cache(lit->flags()->Contains(kDontCache));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (V8::UseCrankshaft()&&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
- CompilationInfoWithZone optimized(function);
- optimized.SetOptimizing(AstNode::kNoNumber);
- return CompileLazy(&optimized);
- }
- }
+ return InstallFullCode(info);
}
-
- return true;
}
}
@@ -787,6 +818,91 @@
}
+void Compiler::RecompileParallel(Handle<JSFunction> closure) {
+ if (closure->IsInRecompileQueue()) return;
+ ASSERT(closure->IsMarkedForParallelRecompilation());
+
+ Isolate* isolate = closure->GetIsolate();
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ }
+ return;
+ }
+
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
+ VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
+ PostponeInterruptsScope postpone(isolate);
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ int compiled_size = shared->end_position() - shared->start_position();
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
+ info->SetOptimizing(AstNode::kNoNumber);
+
+ {
+ CompilationHandleScope handle_scope(*info);
+
+ if (InstallCodeFromOptimizedCodeMap(*info)) return;
+
+ if (ParserApi::Parse(*info, kNoParsingFlags)) {
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
+ shared->set_language_mode(language_mode);
+ info->SaveHandles();
+
+ if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
+ OptimizingCompiler* compiler =
+ new(info->zone()) OptimizingCompiler(*info);
+ OptimizingCompiler::Status status = compiler->CreateGraph();
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ shared->code()->set_profiler_ticks(0);
+ closure->ReplaceCode(isolate->builtins()->builtin(
+ Builtins::kInRecompileQueue));
+ info.Detach();
+ } else if (status == OptimizingCompiler::BAILED_OUT) {
+ isolate->clear_pending_exception();
+ InstallFullCode(*info);
+ }
+ }
+ }
+ }
+
+ if (isolate->has_pending_exception()) {
+ isolate->clear_pending_exception();
+ }
+}
+
+
+void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
+ SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+ // If crankshaft succeeded, install the optimized code else install
+ // the unoptimized code.
+ OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ status = optimizing_compiler->AbortOptimization();
+ } else {
+ status = optimizing_compiler->GenerateAndInstallCode();
+ ASSERT(status == OptimizingCompiler::SUCCEEDED ||
+ status == OptimizingCompiler::BAILED_OUT);
+ }
+
+ InstallCodeCommon(*info);
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ Handle<Code> code = info->code();
+ ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
+ info->closure()->ReplaceCode(*code);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->closure()->context()->global_context()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(*info);
+ }
+ } else {
+ info->SetCode(Handle<Code>(info->shared_info()->code()));
+ InstallFullCode(*info);
+ }
+}
+
+
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
// Precondition: code has been parsed and scopes have been analyzed.
diff --git a/src/compiler.h b/src/compiler.h
index 6df21de..5465a2d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -39,7 +39,7 @@
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
-class CompilationInfo BASE_EMBEDDED {
+class CompilationInfo {
public:
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
@@ -180,6 +180,13 @@
deferred_handles_ = deferred_handles;
}
+ void SaveHandles() {
+ SaveHandle(&closure_);
+ SaveHandle(&shared_info_);
+ SaveHandle(&calling_context_);
+ SaveHandle(&script_);
+ }
+
private:
Isolate* isolate_;
@@ -268,6 +275,14 @@
DeferredHandles* deferred_handles_;
+ template<typename T>
+ void SaveHandle(Handle<T> *object) {
+ if (!object->is_null()) {
+ Handle<T> handle(*(*object));
+ *object = handle;
+ }
+ }
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -346,6 +361,12 @@
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
+ MUST_USE_RESULT Status AbortOptimization() {
+ info_->AbortOptimization();
+ info_->shared_info()->DisableOptimization();
+ return SetLastStatus(BAILED_OUT);
+ }
+
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
@@ -362,11 +383,6 @@
return last_status_;
}
void RecordOptimizationStats();
- MUST_USE_RESULT Status AbortOptimization() {
- info_->AbortOptimization();
- info_->shared_info()->DisableOptimization();
- return SetLastStatus(BAILED_OUT);
- }
struct Timer {
Timer(OptimizingCompiler* compiler, int64_t* location)
@@ -432,6 +448,8 @@
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
+ static void RecompileParallel(Handle<JSFunction> function);
+
// Compile a shared function info object (the function is possibly lazily
// compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
@@ -443,6 +461,8 @@
bool is_toplevel,
Handle<Script> script);
+ static void InstallOptimizedCode(OptimizingCompiler* info);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 77b260f..a98680f 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -459,16 +459,23 @@
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
- bool negative = false;
+ enum Sign {
+ NONE,
+ NEGATIVE,
+ POSITIVE
+ };
+
+ Sign sign = NONE;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
+ sign = POSITIVE;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
- negative = true;
+ sign = NEGATIVE;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -483,34 +490,34 @@
}
ASSERT(buffer_pos == 0);
- return negative ? -V8_INFINITY : V8_INFINITY;
+ return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end || !isDigit(*current, 16)) {
+ if (current == end || !isDigit(*current, 16) || sign != NONE) {
return JunkStringValue(); // "0x".
}
return InternalStringToIntDouble<4>(unicode_cache,
current,
end,
- negative,
+ false,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
}
}
@@ -555,7 +562,7 @@
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
exponent--; // Move this 0 into the exponent.
}
}
@@ -647,7 +654,7 @@
return InternalStringToIntDouble<3>(unicode_cache,
buffer,
buffer + buffer_pos,
- negative,
+ sign == NEGATIVE,
allow_trailing_junk);
}
@@ -660,7 +667,7 @@
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return negative ? -converted : converted;
+ return (sign == NEGATIVE) ? -converted : converted;
}
} } // namespace v8::internal
diff --git a/src/d8.cc b/src/d8.cc
index 804b76c..c716c1c 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1211,7 +1211,7 @@
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
+ if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
diff --git a/src/d8.h b/src/d8.h
index 5f356a6..9e88eca 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -31,7 +31,7 @@
#ifndef V8_SHARED
#include "allocation.h"
#include "hashmap.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "v8.h"
#else
#include "../include/v8.h"
diff --git a/src/execution.cc b/src/execution.cc
index 2613c20..512de4c 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -446,6 +446,25 @@
}
+void StackGuard::RequestCodeReadyEvent() {
+ ASSERT(FLAG_parallel_recompilation);
+ if (ExecutionAccess::TryLock(isolate_)) {
+ thread_local_.interrupt_flags_ |= CODE_READY;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+ ExecutionAccess::Unlock(isolate_);
+ }
+}
+
+
+bool StackGuard::IsCodeReadyEvent() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
+}
+
+
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
@@ -911,6 +930,17 @@
stack_guard->Continue(GC_REQUEST);
}
+ if (stack_guard->IsCodeReadyEvent()) {
+ ASSERT(FLAG_parallel_recompilation);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** CODE_READY event received.\n");
+ }
+ stack_guard->Continue(CODE_READY);
+ }
+ if (!stack_guard->IsTerminateExecution()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
diff --git a/src/execution.h b/src/execution.h
index 01e4b9d..9f5d9ff 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -42,7 +42,8 @@
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6
+ GC_REQUEST = 1 << 6,
+ CODE_READY = 1 << 7
};
@@ -195,6 +196,8 @@
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
+ bool IsCodeReadyEvent();
+ void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
new file mode 100644
index 0000000..cb34737
--- /dev/null
+++ b/src/extensions/statistics-extension.cc
@@ -0,0 +1,110 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "statistics-extension.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const StatisticsExtension::kSource =
+ "native function getV8Statistics();";
+
+
+v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
+ return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
+}
+
+
+v8::Handle<v8::Value> StatisticsExtension::GetCounters(
+ const v8::Arguments& args) {
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ if (args.Length() > 0) { // GC if first argument evaluates to true.
+ if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
+ }
+ }
+
+ Counters* counters = isolate->counters();
+ v8::Local<v8::Object> result = v8::Object::New();
+
+ StatsCounter* counter = NULL;
+
+#define ADD_COUNTER(name, caption) \
+ counter = counters->name(); \
+ if (counter->Enabled()) \
+ result->Set(v8::String::New(#name), \
+ v8::Number::New(*counter->GetInternalPointer()));
+
+ STATS_COUNTER_LIST_1(ADD_COUNTER)
+ STATS_COUNTER_LIST_2(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ counter = counters->count_of_##name(); \
+ if (counter->Enabled()) \
+ result->Set(v8::String::New("count_of_" #name), \
+ v8::Number::New(*counter->GetInternalPointer())); \
+ counter = counters->size_of_##name(); \
+ if (counter->Enabled()) \
+ result->Set(v8::String::New("size_of_" #name), \
+ v8::Number::New(*counter->GetInternalPointer()));
+
+ INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ result->Set(v8::String::New("count_of_CODE_TYPE_" #name), \
+ v8::Number::New( \
+ *counters->count_of_CODE_TYPE_##name()->GetInternalPointer())); \
+ result->Set(v8::String::New("size_of_CODE_TYPE_" #name), \
+ v8::Number::New( \
+ *counters->size_of_CODE_TYPE_##name()->GetInternalPointer()));
+
+ CODE_KIND_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ result->Set(v8::String::New("count_of_FIXED_ARRAY_" #name), \
+ v8::Number::New( \
+ *counters->count_of_FIXED_ARRAY_##name()->GetInternalPointer())); \
+ result->Set(v8::String::New("size_of_FIXED_ARRAY_" #name), \
+ v8::Number::New( \
+ *counters->size_of_FIXED_ARRAY_##name()->GetInternalPointer()));
+
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+
+ return result;
+}
+
+
+void StatisticsExtension::Register() {
+ static StatisticsExtension statistics_extension;
+ static v8::DeclareExtension declaration(&statistics_extension);
+}
+
+} } // namespace v8::internal
diff --git a/src/extensions/statistics-extension.h b/src/extensions/statistics-extension.h
new file mode 100644
index 0000000..433c4cf
--- /dev/null
+++ b/src/extensions/statistics-extension.h
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class StatisticsExtension : public v8::Extension {
+ public:
+ StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
+ static void Register();
+ private:
+ static const char* const kSource;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/src/factory.cc b/src/factory.cc
index 8340b46..25989ca 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -886,38 +886,15 @@
}
-MUST_USE_RESULT static inline MaybeObject* DoCopyAdd(
- DescriptorArray* array,
- String* key,
- Object* value,
- PropertyAttributes attributes) {
- CallbacksDescriptor desc(key, value, attributes);
- MaybeObject* obj = array->CopyAdd(&desc);
- return obj;
-}
-
-
-// Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(isolate(),
- DoCopyAdd(*array, *key, *value, attributes),
- DescriptorArray);
-}
-
-
Handle<String> Factory::SymbolFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->LookupSymbol(*value), String);
}
-Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors) {
+void Factory::CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ Handle<DescriptorArray> array(map->instance_descriptors());
v8::NeanderArray callbacks(descriptors);
int nof_callbacks = callbacks.length();
int descriptor_count = array->number_of_descriptors();
@@ -929,12 +906,13 @@
// Copy the descriptors from the array.
if (0 < descriptor_count) {
- result->SetLastAdded(array->LastAdded());
for (int i = 0; i < descriptor_count; i++) {
result->CopyFrom(i, *array, i, witness);
}
}
+ map->set_instance_descriptors(*result);
+
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
@@ -945,19 +923,19 @@
Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing.
- if (LinearSearch(*result,
- EXPECT_UNSORTED,
- *key,
- result->NumberOfSetDescriptors()) ==
+ if (LinearSearch(*result, *key, map->NumberOfSetDescriptors()) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- result->Append(&desc, witness);
+ map->AppendDescriptor(&desc, witness);
}
}
- int new_number_of_descriptors = result->NumberOfSetDescriptors();
- // Return the old descriptor array if there were no new elements.
- if (new_number_of_descriptors == descriptor_count) return array;
+ int new_number_of_descriptors = map->NumberOfSetDescriptors();
+ // Reinstall the original descriptor array if no new elements were added.
+ if (new_number_of_descriptors == descriptor_count) {
+ map->set_instance_descriptors(*array);
+ return;
+ }
// If duplicates were detected, allocate a result of the right size
// and transfer the elements.
@@ -967,12 +945,8 @@
for (int i = 0; i < new_number_of_descriptors; i++) {
new_result->CopyFrom(i, *result, i, witness);
}
- result = new_result;
+ map->set_instance_descriptors(*new_result);
}
-
- // Sort the result before returning.
- result->Sort(witness);
- return result;
}
@@ -1360,20 +1334,15 @@
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
- Handle<DescriptorArray> array =
- Handle<DescriptorArray>(map->instance_descriptors());
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- array = CopyAppendCallbackDescriptors(array, props);
+ CopyAppendCallbackDescriptors(map, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
- if (!array->IsEmpty()) {
- map->set_instance_descriptors(*array);
- }
ASSERT(result->shared()->IsApiFunction());
return result;
diff --git a/src/factory.h b/src/factory.h
index 3b7ead5..6b22140 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -385,12 +385,6 @@
Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code);
- Handle<DescriptorArray> CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
@@ -502,9 +496,8 @@
Handle<String> name,
LanguageMode language_mode);
- Handle<DescriptorArray> CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors);
+ void CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors);
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 4ad73ad..fdb025b 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -152,7 +152,7 @@
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
-DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
+DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@@ -218,6 +218,12 @@
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
+DEFINE_bool(parallel_recompilation, false,
+ "optimizing hot functions asynchronously on a separate thread")
+DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
+DEFINE_int(parallel_recompilation_queue_length, 2,
+ "the length of the parallel compilation queue")
+
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
@@ -409,6 +415,7 @@
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
diff --git a/src/flags.cc b/src/flags.cc
index 14c230a..bca0eff 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -31,7 +31,7 @@
#include "v8.h"
#include "platform.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-stream.h"
diff --git a/src/handles-inl.h b/src/handles-inl.h
index a5c81ce..1307986 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -149,25 +149,31 @@
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
+ Isolate* isolate = Isolate::Current();
v8::ImplementationUtilities::HandleScopeData* current =
- Isolate::Current()->handle_scope_data();
+ isolate->handle_scope_data();
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ current->limit = current->next;
- level_ = current->level;
- current->level = 0;
+ level_ = current->level;
+ current->level = 0;
+ }
}
inline NoHandleAllocation::~NoHandleAllocation() {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
+ if (active_) {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* data =
+ Isolate::Current()->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
+ }
}
#endif
diff --git a/src/handles.h b/src/handles.h
index 6f3044c..b35693e 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -95,6 +95,7 @@
};
+class DeferredHandles;
class HandleScopeImplementer;
@@ -159,9 +160,11 @@
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
+ friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
+ friend class v8::internal::Isolate;
};
@@ -324,6 +327,7 @@
inline ~NoHandleAllocation();
private:
int level_;
+ bool active_;
#endif
};
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 9d79db2..958fe19 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -772,22 +772,30 @@
#ifdef DEBUG
AssertNoAllocation::AssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(false);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(false);
+ }
}
AssertNoAllocation::~AssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(true);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(true);
+ }
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
#else
diff --git a/src/heap.cc b/src/heap.cc
index 15fd430..b038c77 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -155,7 +155,8 @@
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
promotion_queue_(this),
configured_(false),
- chunks_queued_for_free_(NULL) {
+ chunks_queued_for_free_(NULL),
+ relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -1199,6 +1200,7 @@
void Heap::Scavenge() {
+ RelocationLock relocation_lock(this);
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@@ -2044,7 +2046,8 @@
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
- reinterpret_cast<Map*>(result)->set_bit_field3(0);
+ reinterpret_cast<Map*>(result)->set_bit_field3(
+ Map::LastAddedBits::encode(Map::kNoneAdded));
return result;
}
@@ -2053,9 +2056,8 @@
int instance_size,
ElementsKind elements_kind) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
map->set_map_no_write_barrier(meta_map());
@@ -2072,7 +2074,7 @@
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- map->set_bit_field3(0);
+ map->set_bit_field3(Map::LastAddedBits::encode(Map::kNoneAdded));
map->set_elements_kind(elements_kind);
// If the map object is aligned fill the padding area with Smi 0 objects.
@@ -3824,21 +3826,18 @@
// suggested by the function.
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
- Object* map_obj;
- { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
- if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
- }
+ Map* map;
+ MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
+ if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
Object* prototype;
if (fun->has_instance_prototype()) {
prototype = fun->instance_prototype();
} else {
- { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
+ MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
+ if (!maybe_prototype->To(&prototype)) return maybe_prototype;
}
- Map* map = Map::cast(map_obj);
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
@@ -3857,12 +3856,10 @@
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors_obj =
- DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
- if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
- return maybe_descriptors_obj;
- }
- }
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+
DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
@@ -3870,7 +3867,7 @@
FieldDescriptor field(name, i, NONE, i + 1);
descriptors->Set(i, &field, witness);
}
- descriptors->SortUnchecked(witness);
+ descriptors->Sort(witness);
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
@@ -3879,7 +3876,7 @@
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
- map->set_instance_descriptors(descriptors);
+ map->InitializeDescriptors(descriptors);
map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count);
}
@@ -5698,6 +5695,7 @@
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
+ isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
@@ -6160,6 +6158,8 @@
store_buffer()->SetUp();
+ if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+
return true;
}
@@ -6245,6 +6245,8 @@
isolate_->memory_allocator()->TearDown();
+ delete relocation_mutex_;
+
#ifdef DEBUG
delete debug_utils_;
debug_utils_ = NULL;
@@ -7222,6 +7224,18 @@
static_cast<int>(object_sizes_last_time_[index]));
CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
+ counters->count_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
diff --git a/src/heap.h b/src/heap.h
index 64fd17d..b7caee7 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1601,7 +1601,10 @@
// another.
enum {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- OBJECT_STATS_COUNT = FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1
+ FIRST_FIXED_ARRAY_SUB_TYPE =
+ FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
+ OBJECT_STATS_COUNT =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
};
void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
@@ -1614,12 +1617,35 @@
ASSERT(sub_type <= Code::LAST_CODE_KIND);
object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
+ } else if (type == FIXED_ARRAY_TYPE) {
+ ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
}
}
}
void CheckpointObjectStats();
+ // We don't use a ScopedLock here since we want to lock the heap
+ // only when FLAG_parallel_recompilation is true.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+ ~RelocationLock() {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Unlock();
+ }
+ }
+
+ private:
+ Heap* heap_;
+ };
+
private:
Heap();
@@ -2072,6 +2098,8 @@
MemoryChunk* chunks_queued_for_free_;
+ Mutex* relocation_mutex_;
+
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
@@ -2395,6 +2423,7 @@
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
@@ -2407,6 +2436,7 @@
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index e0caba1..3feab4a 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1479,7 +1479,7 @@
if (has_int32_value_) {
stream->Add("%d ", int32_value_);
} else if (has_double_value_) {
- stream->Add("%lf ", FmtElm(double_value_));
+ stream->Add("%f ", FmtElm(double_value_));
} else {
handle()->ShortPrint(stream);
}
@@ -1803,7 +1803,8 @@
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
@@ -1828,7 +1829,8 @@
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
}
@@ -1857,6 +1859,7 @@
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
+ key_load->key(),
key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
@@ -1917,7 +1920,8 @@
}
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index edd93f0..c2cb271 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -224,6 +224,16 @@
virtual Opcode opcode() const { return HValue::k##type; }
+#ifdef DEBUG
+#define ASSERT_ALLOCATION_DISABLED do { \
+ OptimizingCompilerThread* thread = \
+ ISOLATE->optimizing_compiler_thread(); \
+ ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
+ } while (0)
+#else
+#define ASSERT_ALLOCATION_DISABLED do {} while (0)
+#endif
+
class Range: public ZoneObject {
public:
Range()
@@ -1878,6 +1888,7 @@
class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
+ set_type(HType::Smi());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
@@ -2289,7 +2300,7 @@
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->IsAllocationAllowed());
+ ASSERT_ALLOCATION_DISABLED;
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
@@ -2535,7 +2546,7 @@
bool ToBoolean();
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
+ ASSERT_ALLOCATION_DISABLED;
intptr_t hash;
if (has_int32_value_) {
@@ -2747,17 +2758,31 @@
};
+enum BoundsCheckKeyMode {
+ DONT_ALLOW_SMI_KEY,
+ ALLOW_SMI_KEY
+};
+
+
class HBoundsCheck: public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length) {
+ HBoundsCheck(HValue* index, HValue* length,
+ BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
+ : key_mode_(key_mode) {
SetOperandAt(0, index);
SetOperandAt(1, length);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
+ virtual Representation RequiredInputRepresentation(int arg_index) {
+ if (index()->representation().IsTagged() &&
+ !index()->IsConstant() &&
+ key_mode_ == ALLOW_SMI_KEY) {
+ return Representation::Tagged();
+ } else {
+ return Representation::Integer32();
+ }
}
virtual void PrintDataTo(StringStream* stream);
@@ -2769,6 +2794,7 @@
protected:
virtual bool DataEquals(HValue* other) { return true; }
+ BoundsCheckKeyMode key_mode_;
};
@@ -3640,7 +3666,7 @@
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
+ ASSERT_ALLOCATION_DISABLED;
return reinterpret_cast<intptr_t>(*cell_);
}
@@ -4027,10 +4053,11 @@
};
class HLoadKeyedFastElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
+ HValue* dependency,
ElementsKind elements_kind = FAST_ELEMENTS)
: bit_field_(0) {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
@@ -4041,6 +4068,7 @@
}
SetOperandAt(0, obj);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
@@ -4048,6 +4076,7 @@
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
@@ -4064,9 +4093,9 @@
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
+ if (index == 0) return Representation::Tagged();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
virtual void PrintDataTo(StringStream* stream);
@@ -4096,17 +4125,19 @@
class HLoadKeyedFastDoubleElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
+ HValue* dependency,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
@@ -4114,6 +4145,7 @@
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
@@ -4123,9 +4155,9 @@
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
+ if (index == 0) return Representation::Tagged();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
bool RequiresHoleCheck() {
@@ -4152,16 +4184,18 @@
class HLoadKeyedSpecializedArrayElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
+ HValue* dependency,
ElementsKind elements_kind)
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
set_representation(Representation::Double());
@@ -4177,15 +4211,15 @@
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32, but the base pointer
- // for the element load is a naked pointer.
- return index == 0
- ? Representation::External()
- : Representation::Integer32();
+ // The key is supposed to be Integer32.
+ if (index == 0) return Representation::External();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 7d0e228..df4d8fc 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1711,7 +1711,10 @@
block_side_effects_(graph->blocks()->length(), graph->zone()),
loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
- ASSERT(!info->isolate()->heap()->IsAllocationAllowed());
+#ifdef DEBUG
+ ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
+ !info->isolate()->heap()->IsAllocationAllowed());
+#endif
block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
@@ -3018,7 +3021,6 @@
{
HPhase phase("H_Block building");
- CompilationHandleScope handle_scope(info());
current_block_ = graph()->entry_block();
Scope* scope = info()->scope();
@@ -3079,9 +3081,6 @@
}
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
- NoHandleAllocation no_handles;
- AssertNoAllocation no_gc;
-
*bailout_reason = SmartArrayPointer<char>();
OrderBlocks();
AssignDominators();
@@ -4369,7 +4368,8 @@
HValue* key = AddInstruction(
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
- environment()->ExpressionStackAt(0))); // Iteration index.
+ environment()->ExpressionStackAt(0), // Iteration index.
+ environment()->ExpressionStackAt(0)));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -5709,6 +5709,7 @@
HValue* external_elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
@@ -5752,7 +5753,7 @@
} else {
ASSERT(val == NULL);
return new(zone()) HLoadKeyedSpecializedArrayElement(
- external_elements, checked_key, elements_kind);
+ external_elements, checked_key, dependency, elements_kind);
}
}
@@ -5760,6 +5761,7 @@
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* load_dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
@@ -5788,10 +5790,11 @@
OMIT_HOLE_CHECK :
PERFORM_HOLE_CHECK;
if (IsFastDoubleElementsKind(elements_kind)) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key,
+ load_dependency, mode);
} else { // Smi or Object elements.
return new(zone()) HLoadKeyedFastElement(elements, checked_key,
- elements_kind);
+ load_dependency, elements_kind);
}
}
@@ -5843,12 +5846,14 @@
HInstruction* checked_key = NULL;
if (map->has_external_array_elements()) {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
HLoadExternalArrayPointer* external_elements =
new(zone()) HLoadExternalArrayPointer(elements);
AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(external_elements, checked_key,
- val, map->elements_kind(), is_store);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ map->elements_kind(), is_store);
}
ASSERT(fast_smi_only_elements ||
fast_elements ||
@@ -5860,7 +5865,7 @@
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- return BuildFastElementAccess(elements, checked_key, val,
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
map->elements_kind(), is_store);
}
@@ -6078,9 +6083,11 @@
HInstruction* length;
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
HType::Smi()));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
if (!is_store) {
Push(access);
}
@@ -6093,9 +6100,11 @@
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -6104,7 +6113,8 @@
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind, is_store));
+ external_elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6550,7 +6560,7 @@
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
+ int arguments_count,
HValue* receiver,
int ast_id,
int return_id,
@@ -6712,7 +6722,7 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
- arguments->length(),
+ arguments_count,
function,
undefined,
call_kind,
@@ -6748,7 +6758,7 @@
HEnterInlined* enter_inlined =
new(zone()) HEnterInlined(target,
- arguments->length(),
+ arguments_count,
function,
call_kind,
function_state()->is_construct(),
@@ -6851,7 +6861,7 @@
return TryInline(call_kind,
expr->target(),
- expr->arguments(),
+ expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
@@ -6862,7 +6872,7 @@
bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
- expr->arguments(),
+ expr->arguments()->length(),
receiver,
expr->id(),
expr->ReturnId(),
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 2b0fc31..28f61c5 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -874,7 +874,8 @@
void* operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
- void operator delete(void* ptr) { }
+ void operator delete(void* pointer, Zone* zone) { }
+ void operator delete(void* pointer) { }
private:
// Type of a member function that generates inline code for a native function.
@@ -1031,7 +1032,7 @@
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
+ int arguments_count,
HValue* receiver,
int ast_id,
int return_id,
@@ -1099,11 +1100,13 @@
HValue* external_elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index be46ff2..5c209ed 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -74,6 +74,43 @@
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index fc49ceb..cdf965e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1094,7 +1094,7 @@
// We got a map in register eax. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kLastAddedOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index e4aaaf3..f3bcf1a 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2650,6 +2650,7 @@
__ mov(result,
BuildFastArrayOperand(instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
@@ -2676,6 +2677,7 @@
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
@@ -2686,6 +2688,7 @@
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -2696,11 +2699,15 @@
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
+ if (key_representation.IsTagged() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
@@ -2722,11 +2729,19 @@
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@@ -3679,11 +3694,19 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@@ -3730,6 +3753,7 @@
Operand operand = BuildFastArrayOperand(
instr->object(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -3771,6 +3795,7 @@
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -5295,7 +5320,7 @@
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ mov(result,
- FieldOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ test(result, result);
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 87b975c..42d383e 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -246,6 +246,7 @@
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index = 0);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 55b9df2..457e637 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1900,7 +1900,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
@@ -1912,7 +1913,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1931,11 +1933,17 @@
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstant(instr->key());
+
LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@@ -1961,7 +1969,8 @@
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -1978,7 +1987,8 @@
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -1999,10 +2009,10 @@
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* val = NULL;
if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
@@ -2012,7 +2022,11 @@
} else {
val = UseRegister(instr->value());
}
-
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstant(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val);
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index e05a936..ee9bf79 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -872,6 +872,7 @@
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -1277,6 +1278,19 @@
};
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return key_representation.IsTagged() &&
+ (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS);
+}
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 237bfd9..1fa694f 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2901,7 +2901,7 @@
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
- mov(edx, FieldOperand(edx, DescriptorArray::kLastAddedOffset));
+ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(edx, call_runtime);
// For all objects but the receiver, check that the cache is empty.
diff --git a/src/ic.cc b/src/ic.cc
index 5b71e40..0389330 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1516,10 +1516,10 @@
case CONSTANT_FUNCTION:
return;
case TRANSITION: {
- Map* value = lookup->GetTransitionTarget();
- Handle<Map> transition(Map::cast(value));
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
DescriptorArray* target_descriptors = transition->instance_descriptors();
- int descriptor = target_descriptors->LastAdded();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() != FIELD || details.attributes() != NONE) return;
@@ -1980,9 +1980,9 @@
break;
case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
DescriptorArray* target_descriptors = transition->instance_descriptors();
- int descriptor = target_descriptors->LastAdded();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() == FIELD && details.attributes() == NONE) {
diff --git a/src/isolate.cc b/src/isolate.cc
index cb73c26..69ffc5e 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -477,6 +477,14 @@
Iterate(v, current_t);
}
+void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
+ for (DeferredHandles* deferred = deferred_handles_head_;
+ deferred != NULL;
+ deferred = deferred->next_) {
+ deferred->Iterate(visitor);
+ }
+}
+
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
// The ARM simulator has a separate JS stack. We therefore register
@@ -1484,7 +1492,9 @@
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- context_exit_happened_(false) {
+ context_exit_happened_(false),
+ deferred_handles_head_(NULL),
+ optimizing_compiler_thread_(this) {
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -1565,6 +1575,8 @@
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
// We must stop the logger before we tear down other components.
@@ -1906,6 +1918,7 @@
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}
@@ -1989,6 +2002,36 @@
}
+void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
+ deferred->next_ = deferred_handles_head_;
+ if (deferred_handles_head_ != NULL) {
+ deferred_handles_head_->previous_ = deferred;
+ }
+ deferred_handles_head_ = deferred;
+}
+
+
+void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
+#ifdef DEBUG
+ // In debug mode assert that the linked list is well-formed.
+ DeferredHandles* deferred_iterator = deferred;
+ while (deferred_iterator->previous_ != NULL) {
+ deferred_iterator = deferred_iterator->previous_;
+ }
+ ASSERT(deferred_handles_head_ == deferred_iterator);
+#endif
+ if (deferred_handles_head_ == deferred) {
+ deferred_handles_head_ = deferred_handles_head_->next_;
+ }
+ if (deferred->next_ != NULL) {
+ deferred->next_->previous_ = deferred->previous_;
+ }
+ if (deferred->previous_ != NULL) {
+ deferred->previous_->next_ = deferred->next_;
+ }
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/isolate.h b/src/isolate.h
index 8b4e12b..c0618c8 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -41,6 +41,7 @@
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
+#include "optimizing-compiler-thread.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "runtime.h"
@@ -1055,6 +1056,14 @@
date_cache_ = date_cache;
}
+ void IterateDeferredHandles(ObjectVisitor* visitor);
+ void LinkDeferredHandles(DeferredHandles* deferred_handles);
+ void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+
+ OptimizingCompilerThread* optimizing_compiler_thread() {
+ return &optimizing_compiler_thread_;
+ }
+
private:
Isolate();
@@ -1278,8 +1287,13 @@
#undef ISOLATE_FIELD_OFFSET
#endif
+ DeferredHandles* deferred_handles_head_;
+ OptimizingCompilerThread optimizing_compiler_thread_;
+
friend class ExecutionAccess;
+ friend class HandleScopeImplementer;
friend class IsolateInitializer;
+ friend class OptimizingCompilerThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 4bf00aa..a62196e 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -944,7 +944,15 @@
table_.GetVisitor(map)(map, obj);
}
- template<int id>
+ static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
+ Map* map, HeapObject* obj);
+
+ static void ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type);
+
+ template<StaticMarkingVisitor::VisitorId id>
class ObjectStatsTracker {
public:
static inline void Visit(Map* map, HeapObject* obj);
@@ -1499,16 +1507,84 @@
};
-template<int id>
-void StaticMarkingVisitor::ObjectStatsTracker<id>::Visit(
- Map* map, HeapObject* obj) {
+void StaticMarkingVisitor::ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type) {
+ Heap* heap = fixed_array->map()->GetHeap();
+ if (fixed_array->map() != heap->fixed_cow_array_map() &&
+ fixed_array->map() != heap->fixed_double_array_map() &&
+ fixed_array != heap->empty_fixed_array()) {
+ if (fixed_array->IsDictionary()) {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ dictionary_type,
+ fixed_array->Size());
+ } else {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ fast_type,
+ fixed_array->Size());
+ }
+ }
+}
+
+
+void StaticMarkingVisitor::ObjectStatsVisitBase(
+ StaticVisitorBase::VisitorId id, Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
int object_size = obj->Size();
heap->RecordObjectStats(map->instance_type(), -1, object_size);
- non_count_table_.GetVisitorById(static_cast<VisitorId>(id))(map, obj);
+ non_count_table_.GetVisitorById(id)(map, obj);
+ if (obj->IsJSObject()) {
+ JSObject* object = JSObject::cast(obj);
+ ObjectStatsCountFixedArray(object->elements(),
+ DICTIONARY_ELEMENTS_SUB_TYPE,
+ FAST_ELEMENTS_SUB_TYPE);
+ ObjectStatsCountFixedArray(object->properties(),
+ DICTIONARY_PROPERTIES_SUB_TYPE,
+ FAST_PROPERTIES_SUB_TYPE);
+ }
}
+template<StaticMarkingVisitor::VisitorId id>
+void StaticMarkingVisitor::ObjectStatsTracker<id>::Visit(
+ Map* map, HeapObject* obj) {
+ ObjectStatsVisitBase(id, map, obj);
+}
+
+
+template<>
+class StaticMarkingVisitor::ObjectStatsTracker<
+ StaticMarkingVisitor::kVisitMap> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ Map* map_obj = Map::cast(obj);
+ ASSERT(map->instance_type() == MAP_TYPE);
+ DescriptorArray* array = map_obj->instance_descriptors();
+ if (array != heap->empty_descriptor_array()) {
+ int fixed_array_size = array->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->HasTransitionArray()) {
+ int fixed_array_size = map_obj->transitions()->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->code_cache() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(map_obj->code_cache())->Size());
+ }
+ ObjectStatsVisitBase(kVisitMap, map, obj);
+ }
+};
+
+
template<>
class StaticMarkingVisitor::ObjectStatsTracker<
StaticMarkingVisitor::kVisitCode> {
@@ -1517,10 +1593,44 @@
Heap* heap = map->GetHeap();
int object_size = obj->Size();
ASSERT(map->instance_type() == CODE_TYPE);
- heap->RecordObjectStats(CODE_TYPE, -1, object_size);
heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
- non_count_table_.GetVisitorById(
- static_cast<VisitorId>(kVisitCode))(map, obj);
+ ObjectStatsVisitBase(kVisitCode, map, obj);
+ }
+};
+
+
+template<>
+class StaticMarkingVisitor::ObjectStatsTracker<
+ StaticMarkingVisitor::kVisitSharedFunctionInfo> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ if (sfi->scope_info() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SCOPE_INFO_SUB_TYPE,
+ FixedArray::cast(sfi->scope_info())->Size());
+ }
+ ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
+ }
+};
+
+
+template<>
+class StaticMarkingVisitor::ObjectStatsTracker<
+ StaticMarkingVisitor::kVisitFixedArray> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ if (fixed_array == heap->symbol_table()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TABLE_SUB_TYPE,
+ fixed_array->Size());
+ }
+ ObjectStatsVisitBase(kVisitFixedArray, map, obj);
}
};
@@ -1974,13 +2084,6 @@
if (!base_marker()->MarkObjectWithoutPush(transitions)) return;
Object** transitions_start = transitions->data_start();
- if (transitions->HasElementsTransition()) {
- mark_compact_collector()->RecordSlot(
- transitions_start,
- transitions->GetElementsTransitionSlot(),
- transitions->elements_transition());
- }
-
if (transitions->HasPrototypeTransitions()) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
@@ -2003,16 +2106,6 @@
}
-template <class T>
-void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- mark_compact_collector()->RecordSlot(slot, slot, accessor);
- base_marker()->MarkObjectAndPush(accessor);
-}
-
-
// Fill the marking stack with overflowed objects returned by the given
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
@@ -3309,6 +3402,8 @@
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ Heap::RelocationLock relocation_lock(heap());
+
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
diff --git a/src/mark-compact.h b/src/mark-compact.h
index c2a70fc..ed3def5 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -396,7 +396,6 @@
void MarkMapContents(Map* map);
void MarkDescriptorArray(DescriptorArray* descriptors);
void MarkTransitionArray(TransitionArray* transitions);
- void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
private:
BaseMarker* base_marker() {
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 7ae84fa..0eea32c 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -3533,23 +3533,23 @@
1,
1);
} else {
- if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
- // Register a0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ Push(cache_entry, a2, a3);
+ // a0: precalculated cache entry address.
+ // a2 and a3: parts of the double value.
+ // Store a0, a2 and a3 on stack for later before calling C function.
+ __ Push(a3, a2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ Pop(cache_entry, a2, a3);
+ __ Pop(a3, a2, cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 55b37b4..6f4869e 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1149,7 +1149,7 @@
// We got a map in register v0. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(v0, a1, a2);
- __ lw(a1, FieldMemOperand(a1, DescriptorArray::kLastAddedOffset));
+ __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumCacheOffset));
__ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 6885104..e924159 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -5170,7 +5170,7 @@
Register scratch = ToRegister(instr->scratch());
__ LoadInstanceDescriptors(map, result, scratch);
__ lw(result,
- FieldMemOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index a0fea77..a7673d1 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -5339,7 +5339,7 @@
// Check that there is an enum cache in the non-empty instance
// descriptors (a3). This is the case if the next enumeration
// index field does not contain a smi.
- lw(a3, FieldMemOperand(a3, DescriptorArray::kLastAddedOffset));
+ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(a3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
diff --git a/src/objects-inl.h b/src/objects-inl.h
index feca271..f4e0767 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1952,12 +1952,12 @@
// Perform a linear search in this fixed array. len is the number of entry
// indices that are valid.
template<typename T>
-int LinearSearch(T* array, SearchMode mode, String* name, int len) {
+int LinearSearch(T* array, String* name, int len) {
uint32_t hash = name->Hash();
for (int number = 0; number < len; number++) {
String* entry = array->GetKey(number);
uint32_t current_hash = entry->Hash();
- if (mode == EXPECT_SORTED && current_hash > hash) break;
+ if (current_hash > hash) break;
if (current_hash == hash && name->Equals(entry)) return number;
}
return T::kNotFound;
@@ -1975,7 +1975,7 @@
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
- return LinearSearch(array, EXPECT_SORTED, name, nof);
+ return LinearSearch(array, name, nof);
}
// Slow case: perform binary search.
@@ -2110,13 +2110,26 @@
}
-void DescriptorArray::Append(Descriptor* desc,
- const WhitenessWitness& witness) {
- int descriptor_number = NumberOfSetDescriptors();
+int DescriptorArray::Append(Descriptor* desc,
+ const WhitenessWitness& witness,
+ int number_of_set_descriptors) {
+ int descriptor_number = number_of_set_descriptors;
int enumeration_index = descriptor_number + 1;
desc->SetEnumerationIndex(enumeration_index);
+
+ uint32_t hash = desc->GetKey()->Hash();
+
+ for (; descriptor_number > 0; --descriptor_number) {
+ String* key = GetKey(descriptor_number - 1);
+ if (key->Hash() <= hash) break;
+ Object* value = GetValue(descriptor_number - 1);
+ PropertyDetails details = GetDetails(descriptor_number - 1);
+ Descriptor moved_descriptor(key, value, details);
+ Set(descriptor_number, &moved_descriptor, witness);
+ }
+
Set(descriptor_number, desc, witness);
- SetLastAdded(descriptor_number);
+ return descriptor_number;
}
@@ -2434,9 +2447,10 @@
}
-void SlicedString::set_parent(String* parent) {
+void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
ASSERT(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
}
@@ -2940,16 +2954,12 @@
void Map::set_function_with_prototype(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype));
- }
+ set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
}
bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field3()) != 0;
+ return FunctionWithPrototype::decode(bit_field3());
}
@@ -2994,15 +3004,11 @@
void Map::set_is_shared(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kIsShared));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kIsShared));
- }
+ set_bit_field3(IsShared::update(bit_field3(), value));
}
bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field3()) != 0;
+ return IsShared::decode(bit_field3());
}
@@ -3488,6 +3494,40 @@
}
+void Map::InitializeDescriptors(DescriptorArray* descriptors) {
+ int len = descriptors->number_of_descriptors();
+ ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
+ SLOW_ASSERT(descriptors->IsSortedNoDuplicates());
+
+#ifdef DEBUG
+ bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
+ for (int i = 0; i < len; ++i) used_indices[i] = false;
+
+ // Ensure that all enumeration indexes between 1 and length occur uniquely in
+ // the descriptor array.
+ for (int i = 0; i < len; ++i) {
+ int enum_index = descriptors->GetDetails(i).index() -
+ PropertyDetails::kInitialIndex;
+ ASSERT(0 <= enum_index && enum_index < len);
+ ASSERT(!used_indices[enum_index]);
+ used_indices[enum_index] = true;
+ }
+#endif
+
+ set_instance_descriptors(descriptors);
+
+ for (int i = 0; i < len; ++i) {
+ if (descriptors->GetDetails(i).index() == len) {
+ SetLastAdded(i);
+ break;
+ }
+ }
+
+ ASSERT(len == 0 ||
+ len == descriptors->GetDetails(LastAdded()).index());
+}
+
+
SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
@@ -3507,6 +3547,14 @@
}
+void Map::AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness& witness) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int set_descriptors = NumberOfSetDescriptors();
+ int new_last_added = descriptors->Append(desc, witness, set_descriptors);
+ SetLastAdded(new_last_added);
+}
+
Object* Map::GetBackPointer() {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBackPointerOffset);
@@ -4202,6 +4250,18 @@
}
+bool JSFunction::IsMarkedForParallelRecompilation() {
+ return code() ==
+ GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
+}
+
+
+bool JSFunction::IsInRecompileQueue() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kInRecompileQueue);
+}
+
+
Code* JSFunction::code() {
return Code::cast(unchecked_code());
}
diff --git a/src/objects.cc b/src/objects.cc
index 2ce1451..6511843 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2771,9 +2771,9 @@
strict_mode);
case TRANSITION: {
Map* transition_map = result->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.type() == FIELD) {
@@ -2892,9 +2892,9 @@
return ConvertDescriptorToField(name, value, attributes);
case TRANSITION: {
Map* transition_map = result.GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.type() == FIELD) {
@@ -3080,17 +3080,16 @@
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
Object* fresh;
- { MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- int offset = Map::kCodeCacheOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ MaybeObject* maybe_fresh =
+ fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (maybe_fresh->ToObject(&fresh)) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ int offset = Map::kCodeCacheOffset + kPointerSize;
+ ASSERT(memcmp(Map::cast(fresh)->address() + offset,
+ Map::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
}
#endif
@@ -4496,7 +4495,7 @@
// If there is a transition, try to follow it.
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
- int descriptor_number = target->instance_descriptors()->LastAdded();
+ int descriptor_number = target->LastAdded();
ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
return TryAccessorTransition(
this, target, descriptor_number, component, accessor, attributes);
@@ -4711,14 +4710,14 @@
}
Map* result;
- { MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = RawCopy(new_instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
if (mode != CLEAR_INOBJECT_PROPERTIES) {
result->set_inobject_properties(inobject_properties());
}
+ result->SetLastAdded(kNoneAdded);
result->set_code_cache(code_cache());
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -4750,12 +4749,20 @@
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
String* name,
+ int last_added,
TransitionFlag flag) {
Map* result;
MaybeObject* maybe_result = CopyDropDescriptors();
if (!maybe_result->To(&result)) return maybe_result;
- result->set_instance_descriptors(descriptors);
+ if (last_added == kNoneAdded) {
+ ASSERT(descriptors->IsEmpty());
+ } else {
+ ASSERT(descriptors->GetDetails(last_added).index() ==
+ descriptors->number_of_descriptors());
+ result->set_instance_descriptors(descriptors);
+ result->SetLastAdded(last_added);
+ }
if (flag == INSERT_TRANSITION) {
TransitionArray* transitions;
@@ -4807,32 +4814,80 @@
// array describing these properties.
ASSERT(constructor()->IsJSFunction());
JSFunction* ctor = JSFunction::cast(constructor());
+ Map* initial_map = ctor->initial_map();
+ DescriptorArray* initial_descriptors = initial_map->instance_descriptors();
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors =
- ctor->initial_map()->instance_descriptors()->Copy(
- DescriptorArray::MAY_BE_SHARED);
+ initial_descriptors->Copy(DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(descriptors, NULL, OMIT_TRANSITION);
+ int last_added = initial_descriptors->IsEmpty()
+ ? kNoneAdded
+ : initial_map->LastAdded();
+
+ return CopyReplaceDescriptors(descriptors, NULL, last_added, OMIT_TRANSITION);
}
MaybeObject* Map::Copy(DescriptorArray::SharedMode shared_mode) {
+ DescriptorArray* source_descriptors = instance_descriptors();
DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = instance_descriptors()->Copy(shared_mode);
+ MaybeObject* maybe_descriptors = source_descriptors->Copy(shared_mode);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
- return CopyReplaceDescriptors(descriptors, NULL, OMIT_TRANSITION);
+ int last_added = source_descriptors->IsEmpty() ? kNoneAdded : LastAdded();
+
+ return CopyReplaceDescriptors(descriptors, NULL, last_added, OMIT_TRANSITION);
+}
+
+
+static bool InsertionPointFound(String* key1, String* key2) {
+ return key1->Hash() > key2->Hash() || key1 == key2;
}
MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
TransitionFlag flag) {
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = instance_descriptors()->CopyAdd(descriptor);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+ DescriptorArray* descriptors = instance_descriptors();
- return CopyReplaceDescriptors(descriptors, descriptor->GetKey(), flag);
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ String* key = descriptor->GetKey();
+ ASSERT(descriptors->Search(key) == DescriptorArray::kNotFound);
+
+ int old_size = descriptors->number_of_descriptors();
+ int new_size = old_size + 1;
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(new_size, DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ FixedArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, inserting a descriptor.
+ int insertion_index = -1;
+ int to = 0;
+ for (int from = 0; from < old_size; ++from) {
+ if (insertion_index < 0 &&
+ InsertionPointFound(descriptors->GetKey(from), key)) {
+ insertion_index = to++;
+ }
+ new_descriptors->CopyFrom(to++, descriptors, from, witness);
+ }
+ if (insertion_index < 0) insertion_index = to++;
+
+ ASSERT(to == new_size);
+ ASSERT(new_size == descriptors->NextEnumerationIndex());
+
+ descriptor->SetEnumerationIndex(new_size);
+ new_descriptors->Set(insertion_index, descriptor, witness);
+
+ SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
+
+ return CopyReplaceDescriptors(new_descriptors, key, insertion_index, flag);
}
@@ -4844,31 +4899,49 @@
MaybeObject* maybe_result = descriptor->KeyToSymbol();
if (maybe_result->IsFailure()) return maybe_result;
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors;
-
// We replace the key if it is already present.
int index = old_descriptors->SearchWithCache(descriptor->GetKey());
- if (index == DescriptorArray::kNotFound) {
- maybe_descriptors = old_descriptors->CopyAdd(descriptor);
- } else {
- maybe_descriptors = old_descriptors->CopyReplace(descriptor, index);
+ if (index != DescriptorArray::kNotFound) {
+ return CopyReplaceDescriptor(descriptor, index, flag);
}
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
-
- return CopyReplaceDescriptors(descriptors, descriptor->GetKey(), flag);
+ return CopyAddDescriptor(descriptor, flag);
}
MaybeObject* Map::CopyReplaceDescriptor(Descriptor* descriptor,
- int index,
+ int insertion_index,
TransitionFlag flag) {
- DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors =
- instance_descriptors()->CopyReplace(descriptor, index);
- if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+ DescriptorArray* descriptors = instance_descriptors();
+ int size = descriptors->number_of_descriptors();
+ ASSERT(0 <= insertion_index && insertion_index < size);
- return CopyReplaceDescriptors(descriptors, descriptor->GetKey(), flag);
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ String* key = descriptor->GetKey();
+ ASSERT(key == descriptors->GetKey(insertion_index));
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(size, DescriptorArray::MAY_BE_SHARED);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ FixedArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, replacing a descriptor.
+ for (int index = 0; index < size; ++index) {
+ if (index == insertion_index) continue;
+ new_descriptors->CopyFrom(index, descriptors, index, witness);
+ }
+
+ descriptor->SetEnumerationIndex(
+ descriptors->GetDetails(insertion_index).index());
+ new_descriptors->Set(insertion_index, descriptor, witness);
+
+ SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
+
+ return CopyReplaceDescriptors(new_descriptors, key, LastAdded(), flag);
}
@@ -5655,7 +5728,7 @@
if (!maybe_array->To(&result)) return maybe_array;
}
- result->set(kLastAddedIndex, Smi::FromInt(kNoneAdded));
+ result->set(kEnumCacheIndex, Smi::FromInt(0));
result->set(kTransitionsIndex, Smi::FromInt(0));
return result;
}
@@ -5667,9 +5740,9 @@
ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
if (HasEnumCache()) {
- FixedArray::cast(get(kLastAddedIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(get(kLastAddedIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
} else {
if (IsEmpty()) return; // Do nothing for empty descriptor array.
@@ -5677,19 +5750,11 @@
set(kEnumCacheBridgeCacheIndex, new_cache);
FixedArray::cast(bridge_storage)->
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- NoWriteBarrierSet(FixedArray::cast(bridge_storage),
- kEnumCacheBridgeLastAdded,
- get(kLastAddedIndex));
- set(kLastAddedIndex, bridge_storage);
+ set(kEnumCacheIndex, bridge_storage);
}
}
-static bool InsertionPointFound(String* key1, String* key2) {
- return key1->Hash() > key2->Hash() || key1 == key2;
-}
-
-
void DescriptorArray::CopyFrom(int dst_index,
DescriptorArray* src,
int src_index,
@@ -5700,79 +5765,6 @@
Set(dst_index, &desc, witness);
}
-MaybeObject* DescriptorArray::CopyReplace(Descriptor* descriptor,
- int insertion_index) {
- ASSERT(0 <= insertion_index && insertion_index < number_of_descriptors());
-
- // Ensure the key is a symbol.
- { MaybeObject* maybe_result = descriptor->KeyToSymbol();
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- int size = number_of_descriptors();
-
- DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(size, MAY_BE_SHARED);
- if (!maybe_result->To(&new_descriptors)) return maybe_result;
- }
-
- FixedArray::WhitenessWitness witness(new_descriptors);
-
- // Copy the descriptors, replacing a descriptor.
- for (int index = 0; index < size; ++index) {
- if (index == insertion_index) continue;
- new_descriptors->CopyFrom(index, this, index, witness);
- }
-
- descriptor->SetEnumerationIndex(GetDetails(insertion_index).index());
- new_descriptors->Set(insertion_index, descriptor, witness);
- new_descriptors->SetLastAdded(LastAdded());
-
- SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
-
- return new_descriptors;
-}
-
-
-MaybeObject* DescriptorArray::CopyAdd(Descriptor* descriptor) {
- // Ensure the key is a symbol.
- MaybeObject* maybe_result = descriptor->KeyToSymbol();
- if (maybe_result->IsFailure()) return maybe_result;
-
- String* key = descriptor->GetKey();
- ASSERT(Search(key) == kNotFound);
-
- int new_size = number_of_descriptors() + 1;
-
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = Allocate(new_size, MAY_BE_SHARED);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- FixedArray::WhitenessWitness witness(new_descriptors);
-
- // Copy the descriptors, inserting a descriptor.
- int insertion_index = -1;
- int to = 0;
- for (int from = 0; from < number_of_descriptors(); ++from) {
- if (insertion_index < 0 && InsertionPointFound(GetKey(from), key)) {
- insertion_index = to++;
- }
- new_descriptors->CopyFrom(to++, this, from, witness);
- }
- if (insertion_index < 0) insertion_index = to++;
-
- ASSERT(to == new_descriptors->number_of_descriptors());
-
- ASSERT(new_size == NextEnumerationIndex());
- descriptor->SetEnumerationIndex(new_size);
- new_descriptors->Set(insertion_index, descriptor, witness);
- new_descriptors->SetLastAdded(insertion_index);
-
- SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
-
- return new_descriptors;
-}
-
MaybeObject* DescriptorArray::Copy(SharedMode shared_mode) {
// Allocate the new descriptor array.
@@ -5787,25 +5779,19 @@
for (int i = 0; i < number_of_descriptors; i++) {
new_descriptors->CopyFrom(i, this, i, witness);
}
- new_descriptors->SetLastAdded(LastAdded());
}
return new_descriptors;
}
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
// candidate. This would result in missing updates upon evacuation.
-void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
+void DescriptorArray::Sort(const WhitenessWitness& witness) {
// In-place heap sort.
int len = number_of_descriptors();
- // Nothing to sort.
- if (len == 0) return;
-
- ASSERT(LastAdded() == kNoneAdded ||
- GetDetails(LastAdded()).index() == number_of_descriptors());
-
// Bottom-up max-heap construction.
// Index of the last node with children
const int max_parent_index = (len / 2) - 1;
@@ -5852,36 +5838,6 @@
parent_index = child_index;
}
}
-
-#ifdef DEBUG
- // Ensure that all enumeration indexes between 1 and length occur uniquely in
- // the descriptor array.
- for (int i = 1; i <= len; ++i) {
- int j;
- for (j = 0; j < len; ++j) {
- if (GetDetails(j).index() == i) break;
- }
- ASSERT(j != len);
- for (j++; j < len; ++j) {
- ASSERT(GetDetails(j).index() != i);
- }
- }
-#endif
-
- for (int i = 0; i < len; ++i) {
- if (GetDetails(i).index() == len) {
- SetLastAdded(i);
- return;
- }
- }
-
- UNREACHABLE();
-}
-
-
-void DescriptorArray::Sort(const WhitenessWitness& witness) {
- SortUnchecked(witness);
- SLOW_ASSERT(IsSortedNoDuplicates());
}
@@ -7263,8 +7219,10 @@
instance_type() == other->instance_type() &&
bit_field() == other->bit_field() &&
bit_field2() == other->bit_field2() &&
- (bit_field3() & ~(1<<Map::kIsShared)) ==
- (other->bit_field3() & ~(1<<Map::kIsShared));
+ static_cast<uint32_t>(bit_field3()) ==
+ LastAddedBits::update(
+ IsShared::update(other->bit_field3(), true),
+ kNoneAdded);
}
@@ -7285,6 +7243,18 @@
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
+void JSFunction::MarkForParallelRecompilation() {
+ ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ Builtins* builtins = GetIsolate()->builtins();
+ ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
+
+ // Unlike MarkForLazyRecompilation, after queuing a function for
+ // recompilation on the compiler thread, we actually tail-call into
+ // the full code. We reset the profiler ticks here so that the
+ // function doesn't bother the runtime profiler too much.
+ shared()->code()->set_profiler_ticks(0);
+}
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
@@ -12477,6 +12447,24 @@
}
}
+ int inobject_props = obj->map()->inobject_properties();
+
+ // Allocate new map.
+ Map* new_map;
+ MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ if (instance_descriptor_length == 0) {
+ ASSERT_LE(unused_property_fields, inobject_props);
+ // Transform the object.
+ new_map->set_unused_property_fields(unused_property_fields);
+ obj->set_map(new_map);
+ obj->set_properties(heap->empty_fixed_array());
+ // Check that it really works.
+ ASSERT(obj->HasFastProperties());
+ return obj;
+ }
+
// Allocate the instance descriptor.
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors =
@@ -12488,7 +12476,6 @@
FixedArray::WhitenessWitness witness(descriptors);
- int inobject_props = obj->map()->inobject_properties();
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
if (number_of_allocated_fields < 0) {
@@ -12553,13 +12540,9 @@
ASSERT(current_offset == number_of_fields);
descriptors->Sort(witness);
- // Allocate new map.
- Map* new_map;
- MaybeObject* maybe_new_map =
- obj->map()->CopyReplaceDescriptors(descriptors, NULL, OMIT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
new_map->set_unused_property_fields(unused_property_fields);
+ new_map->InitializeDescriptors(descriptors);
// Transform the object.
obj->set_map(new_map);
diff --git a/src/objects.h b/src/objects.h
index 6349c02..92c2bc5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -33,7 +33,7 @@
#include "elements-kind.h"
#include "list.h"
#include "property-details.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
@@ -170,14 +170,6 @@
};
-// Indicates whether the search function should expect a sorted or an unsorted
-// array as input.
-enum SearchMode {
- EXPECT_SORTED,
- EXPECT_UNSORTED
-};
-
-
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
@@ -662,6 +654,25 @@
STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(FAST_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(MAP_CODE_CACHE_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SYMBOL_TABLE_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(TRANSITION_ARRAY_SUB_TYPE)
+
+enum FixedArraySubInstanceType {
+#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
+};
+
+
enum CompareResult {
LESS = -1,
EQUAL = 0,
@@ -2480,43 +2491,20 @@
inline int number_of_entries() { return number_of_descriptors(); }
inline int NextEnumerationIndex() { return number_of_descriptors() + 1; }
- int LastAdded() {
- ASSERT(!IsEmpty());
- Object* obj = get(kLastAddedIndex);
- if (obj->IsSmi()) {
- return Smi::cast(obj)->value();
- } else {
- Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeLastAdded);
- return Smi::cast(index)->value();
- }
- }
-
- // Set index of the last added descriptor and flush any enum cache.
- void SetLastAdded(int index) {
- ASSERT(!IsEmpty() || index > 0);
- set(kLastAddedIndex, Smi::FromInt(index));
- }
-
- int NumberOfSetDescriptors() {
- ASSERT(!IsEmpty());
- if (LastAdded() == kNoneAdded) return 0;
- return GetDetails(LastAdded()).index();
- }
-
bool HasEnumCache() {
- return !IsEmpty() && !get(kLastAddedIndex)->IsSmi();
+ return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
}
Object* GetEnumCache() {
ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kLastAddedIndex));
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
return bridge->get(kEnumCacheBridgeCacheIndex);
}
Object** GetEnumCacheSlot() {
ASSERT(HasEnumCache());
return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kLastAddedOffset);
+ kEnumCacheOffset);
}
Object** GetTransitionsSlot() {
@@ -2552,8 +2540,9 @@
// Append automatically sets the enumeration index. This should only be used
// to add descriptors in bulk at the end, followed by sorting the descriptor
// array.
- inline void Append(Descriptor* desc,
- const WhitenessWitness&);
+ inline int Append(Descriptor* desc,
+ const WhitenessWitness&,
+ int number_of_set_descriptors);
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
@@ -2562,16 +2551,6 @@
int src_index,
const WhitenessWitness&);
- // Copy the descriptor array, inserting new descriptor. Its enumeration index
- // is automatically set to the size of the descriptor array to which it was
- // added first.
- MUST_USE_RESULT MaybeObject* CopyAdd(Descriptor* descriptor);
-
- // Copy the descriptor array, replacing a descriptor. Its enumeration index is
- // kept.
- MUST_USE_RESULT MaybeObject* CopyReplace(Descriptor* descriptor,
- int insertion_index);
-
// Indicates whether the search function should expect a sorted or an unsorted
// descriptor array as input.
enum SharedMode {
@@ -2584,11 +2563,6 @@
MUST_USE_RESULT MaybeObject* Copy(SharedMode shared_mode);
// Sort the instance descriptors by the hash codes of their keys.
- // Does not check for duplicates.
- void SortUnchecked(const WhitenessWitness&);
-
- // Sort the instance descriptors by the hash codes of their keys.
- // Checks the result for duplicates.
void Sort(const WhitenessWitness&);
// Search the instance descriptors for given name.
@@ -2612,31 +2586,25 @@
// Constant for denoting key was not found.
static const int kNotFound = -1;
- // Constant for denoting that the LastAdded field was not yet set.
- static const int kNoneAdded = -1;
-
static const int kBackPointerStorageIndex = 0;
- static const int kLastAddedIndex = 1;
+ static const int kEnumCacheIndex = 1;
static const int kTransitionsIndex = 2;
static const int kFirstIndex = 3;
// The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 3;
- static const int kEnumCacheBridgeLastAdded = 0;
- static const int kEnumCacheBridgeCacheIndex = 1;
- static const int kEnumCacheBridgeIndicesCacheIndex = 2;
+ static const int kEnumCacheBridgeLength = 2;
+ static const int kEnumCacheBridgeCacheIndex = 0;
+ static const int kEnumCacheBridgeIndicesCacheIndex = 1;
// Layout description.
static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
- static const int kLastAddedOffset = kBackPointerStorageOffset +
+ static const int kEnumCacheOffset = kBackPointerStorageOffset +
kPointerSize;
- static const int kTransitionsOffset = kLastAddedOffset + kPointerSize;
+ static const int kTransitionsOffset = kEnumCacheOffset + kPointerSize;
static const int kFirstOffset = kTransitionsOffset + kPointerSize;
// Layout description for the bridge array.
- static const int kEnumCacheBridgeLastAddedOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheBridgeCacheOffset =
- kEnumCacheBridgeLastAddedOffset + kPointerSize;
+ static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
// Layout of descriptor.
static const int kDescriptorKey = 0;
@@ -2714,7 +2682,7 @@
template<typename T>
-inline int LinearSearch(T* array, SearchMode mode, String* name, int len);
+inline int LinearSearch(T* array, String* name, int len);
template<typename T>
@@ -4696,6 +4664,10 @@
inline int bit_field3();
inline void set_bit_field3(int value);
+ class IsShared: public BitField<bool, 0, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 1, 1> {};
+ class LastAddedBits: public BitField<int, 2, 11> {};
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
@@ -4855,6 +4827,7 @@
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+ inline void InitializeDescriptors(DescriptorArray* descriptors);
// Should only be called to clear a descriptor array that was only used to
// store transitions and does not contain any live transitions anymore.
@@ -4919,11 +4892,28 @@
String* name,
LookupResult* result);
+ void SetLastAdded(int index) {
+ set_bit_field3(LastAddedBits::update(bit_field3(), index));
+ }
+
+ int LastAdded() {
+ return LastAddedBits::decode(bit_field3());
+ }
+
+ int NumberOfSetDescriptors() {
+ ASSERT(!instance_descriptors()->IsEmpty());
+ if (LastAdded() == kNoneAdded) return 0;
+ return instance_descriptors()->GetDetails(LastAdded()).index();
+ }
+
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
- DescriptorArray* descriptors, String* name, TransitionFlag flag);
+ DescriptorArray* descriptors,
+ String* name,
+ int last_added,
+ TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor,
@@ -4937,6 +4927,9 @@
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
+ inline void AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness&);
+
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
MUST_USE_RESULT MaybeObject* Copy(DescriptorArray::SharedMode shared_mode);
@@ -5048,6 +5041,9 @@
static const int kMaxPreAllocatedPropertyFields = 255;
+ // Constant for denoting that the LastAdded field was not yet set.
+ static const int kNoneAdded = LastAddedBits::kMax;
+
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
@@ -6001,6 +5997,7 @@
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
+ void MarkForParallelRecompilation();
// Helpers to compile this function. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
@@ -6015,6 +6012,11 @@
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
+ inline bool IsMarkedForParallelRecompilation();
+
+ // Tells whether or not the function is on the parallel
+ // recompilation queue.
+ inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -7595,7 +7597,8 @@
class SlicedString: public String {
public:
inline String* parent();
- inline void set_parent(String* parent);
+ inline void set_parent(String* parent,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int offset();
inline void set_offset(int offset);
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
new file mode 100644
index 0000000..06018dd
--- /dev/null
+++ b/src/optimizing-compiler-thread.cc
@@ -0,0 +1,127 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "optimizing-compiler-thread.h"
+
+#include "v8.h"
+
+#include "hydrogen.h"
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+
+void OptimizingCompilerThread::Run() {
+#ifdef DEBUG
+ thread_id_ = ThreadId::Current().ToInteger();
+#endif
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+
+ int64_t epoch = 0;
+ if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+
+ while (true) {
+ input_queue_semaphore_->Wait();
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_total_ = OS::Ticks() - epoch;
+ }
+ return;
+ }
+
+ int64_t compiling_start = 0;
+ if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
+
+ Heap::RelocationLock relocation_lock(isolate_->heap());
+ OptimizingCompiler* optimizing_compiler = NULL;
+ input_queue_.Dequeue(&optimizing_compiler);
+ Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+
+ ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
+
+ OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ ASSERT(status != OptimizingCompiler::FAILED);
+ // Prevent an unused-variable error in release mode.
+ USE(status);
+
+ output_queue_.Enqueue(optimizing_compiler);
+ isolate_->stack_guard()->RequestCodeReadyEvent();
+
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_compiling_ += OS::Ticks() - compiling_start;
+ }
+ }
+}
+
+
+void OptimizingCompilerThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ input_queue_semaphore_->Signal();
+ stop_semaphore_->Wait();
+
+ if (FLAG_trace_parallel_recompilation) {
+ double compile_time = static_cast<double>(time_spent_compiling_);
+ double total_time = static_cast<double>(time_spent_total_);
+ double percentage = (compile_time * 100) / total_time;
+ PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
+ }
+}
+
+
+void OptimizingCompilerThread::InstallOptimizedFunctions() {
+ HandleScope handle_scope(isolate_);
+ int functions_installed = 0;
+ while (!output_queue_.IsEmpty()) {
+ OptimizingCompiler* compiler = NULL;
+ output_queue_.Dequeue(&compiler);
+ Compiler::InstallOptimizedCode(compiler);
+ functions_installed++;
+ }
+ if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
+ PrintF(" ** Installed %d function(s).\n", functions_installed);
+ }
+}
+
+
+void OptimizingCompilerThread::QueueForOptimization(
+ OptimizingCompiler* optimizing_compiler) {
+ input_queue_.Enqueue(optimizing_compiler);
+ input_queue_semaphore_->Signal();
+}
+
+#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread() {
+ if (!FLAG_parallel_recompilation) return false;
+ return ThreadId::Current().ToInteger() == thread_id_;
+}
+#endif
+
+
+} } // namespace v8::internal
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
new file mode 100644
index 0000000..d562726
--- /dev/null
+++ b/src/optimizing-compiler-thread.h
@@ -0,0 +1,101 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
+#define V8_OPTIMIZING_COMPILER_THREAD_H_
+
+#include "atomicops.h"
+#include "platform.h"
+#include "flags.h"
+#include "unbound-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class HGraphBuilder;
+class OptimizingCompiler;
+
+class OptimizingCompilerThread : public Thread {
+ public:
+ explicit OptimizingCompilerThread(Isolate *isolate) :
+ Thread("OptimizingCompilerThread"),
+ isolate_(isolate),
+ stop_semaphore_(OS::CreateSemaphore(0)),
+ input_queue_semaphore_(OS::CreateSemaphore(0)),
+ time_spent_compiling_(0),
+ time_spent_total_(0) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ }
+
+ void Run();
+ void Stop();
+ void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void InstallOptimizedFunctions();
+
+ inline bool IsQueueAvailable() {
+ // We don't need a barrier since we have a data dependency right
+ // after.
+ Atomic32 current_length = NoBarrier_Load(&queue_length_);
+
+ // This can be queried only from the execution thread.
+ ASSERT(!IsOptimizerThread());
+ // Since only the execution thread increments queue_length_ and
+ // only one thread can run inside an Isolate at one time, a direct
+ // doesn't introduce a race -- queue_length_ may decreased in
+ // meantime, but not increased.
+ return (current_length < FLAG_parallel_recompilation_queue_length);
+ }
+
+#ifdef DEBUG
+ bool IsOptimizerThread();
+#endif
+
+ ~OptimizingCompilerThread() {
+ delete input_queue_semaphore_;
+ delete stop_semaphore_;
+ }
+
+ private:
+ Isolate* isolate_;
+ Semaphore* stop_semaphore_;
+ Semaphore* input_queue_semaphore_;
+ UnboundQueue<OptimizingCompiler*> input_queue_;
+ UnboundQueue<OptimizingCompiler*> output_queue_;
+ volatile AtomicWord stop_thread_;
+ volatile Atomic32 queue_length_;
+ int64_t time_spent_compiling_;
+ int64_t time_spent_total_;
+
+#ifdef DEBUG
+ int thread_id_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
diff --git a/src/parser.cc b/src/parser.cc
index 5ec2857..5ee217c 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -567,14 +567,15 @@
FunctionLiteral* Parser::ParseProgram() {
ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
-
HistogramTimerScope timer(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
@@ -582,12 +583,27 @@
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info(), source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
}
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ if (info()->is_eval()) {
+ PrintF("[parsing eval");
+ } else if (info()->script()->name()->IsString()) {
+ String* name = String::cast(info()->script()->name());
+ SmartArrayPointer<char> name_chars = name->ToCString();
+ PrintF("[parsing script: %s", *name_chars);
+ } else {
+ PrintF("[parsing script");
+ }
+ PrintF(" - took %0.3f ms]\n", ms);
+ }
+ return result;
}
@@ -668,24 +684,31 @@
HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
-
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
+
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(&stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(&stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
}
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ SmartArrayPointer<char> name_chars = result->name()->ToCString();
+ PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+ }
+ return result;
}
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 6c64350..02e146f 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -84,6 +84,7 @@
return gc_entry_;
case JS:
case COMPILER:
+ case PARALLEL_COMPILER_PROLOGUE:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index cdbc77a..1856359 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -151,15 +151,20 @@
PrintF("]\n");
}
- // The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
+ if (FLAG_parallel_recompilation) {
+ function->MarkForParallelRecompilation();
+ } else {
+ // The next call to the function will trigger optimization.
+ function->MarkForLazyRecompilation();
+ }
}
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation());
+ ASSERT(function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -278,7 +283,8 @@
if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsMarkedForLazyRecompilation()) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation()) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
diff --git a/src/runtime.cc b/src/runtime.cc
index 54bb61a..f7bd0e4 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -54,7 +54,7 @@
#include "runtime-profiler.h"
#include "runtime.h"
#include "scopeinfo.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-search.h"
#include "stub-cache.h"
#include "v8threads.h"
@@ -2160,10 +2160,7 @@
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- MaybeObject* maybe_name =
- isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
- String* name;
- if (!maybe_name->To(&name)) return maybe_name;
+ String* name = isolate->heap()->prototype_symbol();
if (function->HasFastProperties()) {
// Construct a new field descriptor with updated attributes.
@@ -8292,6 +8289,14 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation);
+ Compiler::RecompileParallel(args.at<JSFunction>(0));
+ return *isolate->factory()->undefined_value();
+}
+
+
class ActivationsFinder : public ThreadVisitor {
public:
explicit ActivationsFinder(JSFunction* function)
@@ -8486,6 +8491,11 @@
return Smi::FromInt(4); // 4 == "never".
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (FLAG_parallel_recompilation) {
+ if (function->IsMarkedForLazyRecompilation()) {
+ return Smi::FromInt(5);
+ }
+ }
if (FLAG_always_opt) {
// We may have always opt, but that is more best-effort than a real
// promise, so we still say "no" if it is not optimized.
diff --git a/src/runtime.h b/src/runtime.h
index c5c8bcb..da78c26 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -86,6 +86,7 @@
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
diff --git a/src/smart-array-pointer.h b/src/smart-pointers.h
similarity index 67%
rename from src/smart-array-pointer.h
rename to src/smart-pointers.h
index 00721c1..345c4d4 100644
--- a/src/smart-array-pointer.h
+++ b/src/smart-pointers.h
@@ -25,34 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_SMART_ARRAY_POINTER_H_
-#define V8_SMART_ARRAY_POINTER_H_
+#ifndef V8_SMART_POINTERS_H_
+#define V8_SMART_POINTERS_H_
namespace v8 {
namespace internal {
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-template<typename T>
-class SmartArrayPointer {
+template<typename Deallocator, typename T>
+class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
- inline SmartArrayPointer() : p_(NULL) {}
+ inline SmartPointerBase() : p_(NULL) {}
// Constructs a scoped pointer from a plain one.
- explicit inline SmartArrayPointer(T* ptr) : p_(ptr) {}
+ explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs) : p_(rhs.p_) {
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+ : p_(rhs.p_) {
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
// When the destructor of the scoped pointer is executed the plain pointer
// is deleted using DeleteArray. This implies that you must allocate with
// NewArray.
- inline ~SmartArrayPointer() { if (p_) DeleteArray(p_); }
+ inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
inline T* operator->() const { return p_; }
@@ -81,10 +80,11 @@
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
- inline SmartArrayPointer& operator=(const SmartArrayPointer<T>& rhs) {
+ inline SmartPointerBase<Deallocator, T>& operator=(
+ const SmartPointerBase<Deallocator, T>& rhs) {
ASSERT(is_empty());
T* tmp = rhs.p_; // swap to handle self-assignment
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
p_ = tmp;
return *this;
}
@@ -95,6 +95,45 @@
T* p_;
};
+// A 'scoped array pointer' that calls DeleteArray on its pointer when the
+// destructor is called.
+
+template<typename T>
+struct ArrayDeallocator {
+ static void Delete(T* array) {
+ DeleteArray(array);
+ }
+};
+
+
+template<typename T>
+class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
+ public:
+ inline SmartArrayPointer() { }
+ explicit inline SmartArrayPointer(T* ptr)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
+ inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
+};
+
+
+template<typename T>
+struct ObjectDeallocator {
+ static void Delete(T* array) {
+ Malloced::Delete(array);
+ }
+};
+
+template<typename T>
+class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
+ public:
+ inline SmartPointer() { }
+ explicit inline SmartPointer(T* ptr)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
+ inline SmartPointer(const SmartPointer<T>& rhs)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
+};
+
} } // namespace v8::internal
-#endif // V8_SMART_ARRAY_POINTER_H_
+#endif // V8_SMART_POINTERS_H_
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 0de9854..24ff8c2 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -161,18 +161,12 @@
PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
Map* map = GetTarget(transition_number);
DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
- ASSERT(descriptor != DescriptorArray::kNotFound);
+ int descriptor = map->LastAdded();
+ ASSERT(descriptor != Map::kNoneAdded);
return descriptors->GetDetails(descriptor);
}
-Object** TransitionArray::GetElementsTransitionSlot() {
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kElementsTransitionOffset);
-}
-
-
int TransitionArray::Search(String* name) {
return internal::Search(this, name);
}
diff --git a/src/transitions.cc b/src/transitions.cc
index f2e49ec..5dda18e 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -69,10 +69,8 @@
MaybeObject* TransitionArray::NewWith(String* name, Map* target) {
TransitionArray* result;
- { MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(1);
- if (!maybe_array->To(&result)) return maybe_array;
- }
+ MaybeObject* maybe_array = TransitionArray::Allocate(1);
+ if (!maybe_array->To(&result)) return maybe_array;
FixedArray::WhitenessWitness witness(result);
diff --git a/src/transitions.h b/src/transitions.h
index 7acb88f..bc661ac 100644
--- a/src/transitions.h
+++ b/src/transitions.h
@@ -61,7 +61,6 @@
inline void set_elements_transition(
Map* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline Object** GetElementsTransitionSlot();
inline bool HasElementsTransition();
inline void ClearElementsTransition();
diff --git a/src/v8-counters.cc b/src/v8-counters.cc
index f17aacc..b5a40b5 100644
--- a/src/v8-counters.cc
+++ b/src/v8-counters.cc
@@ -71,6 +71,16 @@
CODE_KIND_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter count_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \
+ count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \
+ StatsCounter size_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \
+ size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
StatsCounter state_counters[] = {
#define COUNTER_NAME(name) \
{ "c:V8.State" #name, NULL, false },
diff --git a/src/v8-counters.h b/src/v8-counters.h
index cb789af..e230613 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -323,6 +323,14 @@
CODE_KIND_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_FIXED_ARRAY_##name() \
+ { return &count_of_FIXED_ARRAY_##name##_; } \
+ StatsCounter* size_of_FIXED_ARRAY_##name() \
+ { return &size_of_FIXED_ARRAY_##name##_; }
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
@@ -341,6 +349,10 @@
kSizeOfCODE_TYPE_##name,
CODE_KIND_LIST(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
+ kSizeOfFIXED_ARRAY__##name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
#define COUNTER_ID(name) k_##name,
STATE_TAG_LIST(COUNTER_ID)
#undef COUNTER_ID
@@ -380,6 +392,12 @@
CODE_KIND_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter size_of_FIXED_ARRAY_##name##_; \
+ StatsCounter count_of_FIXED_ARRAY_##name##_;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum {
#define COUNTER_ID(name) __##name,
STATE_TAG_LIST(COUNTER_ID)
diff --git a/src/v8.cc b/src/v8.cc
index 2910a07..eaff43c 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -106,13 +106,16 @@
if (!has_been_set_up_ || has_been_disposed_) return;
+ // The isolate has to be torn down before clearing the LOperand
+ // caches so that the optimizing compiler thread (if running)
+ // doesn't see an inconsistent view of the lithium instructions.
+ isolate->TearDown();
+ delete isolate;
+
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
RegisteredExtension::UnregisterAll();
- isolate->TearDown();
- delete isolate;
-
is_running_ = false;
has_been_disposed_ = true;
diff --git a/src/v8globals.h b/src/v8globals.h
index a295f8b..a95672d 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -359,11 +359,12 @@
// VMState object leaves a state by popping the current state from the
// stack.
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(OTHER) \
+#define STATE_TAG_LIST(V) \
+ V(JS) \
+ V(GC) \
+ V(COMPILER) \
+ V(PARALLEL_COMPILER_PROLOGUE) \
+ V(OTHER) \
V(EXTERNAL)
enum StateTag {
diff --git a/src/version.cc b/src/version.cc
index 83fd1c1..03c020b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 12
-#define BUILD_NUMBER 14
+#define BUILD_NUMBER 15
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index c647e56..384940d 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -47,6 +47,8 @@
return "GC";
case COMPILER:
return "COMPILER";
+ case PARALLEL_COMPILER_PROLOGUE:
+ return "PARALLEL_COMPILER_PROLOGUE";
case OTHER:
return "OTHER";
case EXTERNAL:
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 89b9710..b3aa936 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -75,6 +75,7 @@
// Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
+ __ push(rdi);
__ push(rcx);
__ push(rbx);
__ movq(rbp, rsp);
@@ -128,6 +129,7 @@
__ movq(rsp, rbp);
__ pop(rbx);
__ pop(rcx);
+ __ pop(rdi);
__ popfq();
__ pop(rbp);
__ ret(0);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 0af0a43..b19a3d0 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -73,6 +73,45 @@
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ movq(kScratchRegister,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
+ __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ jmp(kScratchRegister);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 2a752f2..3eb6493 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1111,7 +1111,7 @@
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kLastAddedOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 461b632..3e688df 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2503,18 +2503,22 @@
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits.
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
// Load the result.
__ movq(result,
BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
@@ -2535,12 +2539,16 @@
void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2548,7 +2556,7 @@
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
@@ -2558,7 +2566,7 @@
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -2595,17 +2603,23 @@
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
@@ -3538,18 +3552,23 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
}
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
@@ -3593,7 +3612,8 @@
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (FLAG_debug_code) {
+ if (FLAG_debug_code &&
+ !instr->hydrogen()->index()->representation().IsTagged()) {
__ AbortIfNotZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
@@ -3601,7 +3621,8 @@
Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
} else {
Register reg2 = ToRegister(instr->index());
- if (FLAG_debug_code) {
+ if (FLAG_debug_code &&
+ !instr->hydrogen()->index()->representation().IsTagged()) {
__ AbortIfNotZeroExtended(reg2);
}
__ cmpq(reg, reg2);
@@ -3621,37 +3642,42 @@
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
Operand operand =
BuildFastArrayOperand(instr->object(),
- instr->key(),
+ key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- __ movq(operand, value);
-
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ lea(key, operand);
+ Register key_reg(ToRegister(key));
+ __ lea(key_reg, operand);
+ __ movq(Operand(key_reg, 0), value);
__ RecordWrite(elements,
- key,
+ key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
+ } else {
+ __ movq(operand, value);
}
}
@@ -3659,6 +3685,17 @@
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -3675,18 +3712,11 @@
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
__ movsd(double_store_operand, value);
}
@@ -5007,7 +5037,7 @@
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ movq(result,
- FieldOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
Condition cc = masm()->CheckSmi(result);
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index ecacac0..24b991e 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1815,10 +1815,15 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastElement* result =
+ new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
@@ -1827,9 +1832,13 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
@@ -1846,9 +1855,13 @@
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
@@ -1873,13 +1886,16 @@
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
+ bool clobbers_key = needs_write_barrier ||
+ instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastElement(obj, key, val);
@@ -1890,12 +1906,15 @@
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
@@ -1911,7 +1930,8 @@
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
@@ -1920,11 +1940,12 @@
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
-
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ key, val);
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index e7a0a08..a7ae588 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -857,6 +857,7 @@
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 4b8ca8f..e3c9a92 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4473,7 +4473,7 @@
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration
// index field does not contain a smi.
- movq(rdx, FieldOperand(rdx, DescriptorArray::kLastAddedOffset));
+ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(rdx, call_runtime);
// For all objects but the receiver, check that the cache is empty.