Version 3.9.24
Activated count-based profiler for ARM.
Fixed use of proxies as f.prototype properties. (issue 2021)
Enabled snapshots on MIPS.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@11125 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 53cb1db..2240ec0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2012-03-23: Version 3.9.24
+
+ Activated count-based profiler for ARM.
+
+ Fixed use of proxies as f.prototype properties. (issue 2021)
+
+ Enabled snapshots on MIPS.
+
+ Performance and stability improvements on all platforms.
+
+
2012-03-21: Version 3.9.23
Use correct arguments adaptation environment when inlining function
diff --git a/SConstruct b/SConstruct
index b57951a..34d0efc 100644
--- a/SConstruct
+++ b/SConstruct
@@ -218,9 +218,12 @@
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
+ 'fpu:on': {
+ 'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+ }
},
'mipsabi:hardfloat': {
- 'CPPDEFINES': ['__mips_hard_float=1'],
+ 'CPPDEFINES': ['__mips_hard_float=1', 'CAN_USE_FPU_INSTRUCTIONS'],
}
},
'arch:x64': {
@@ -575,7 +578,10 @@
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
- 'LINKFLAGS': ['-mhard-float']
+ 'LINKFLAGS': ['-mhard-float'],
+ 'fpu:on': {
+ 'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+ }
}
}
},
@@ -1146,6 +1152,11 @@
'default': 'on',
'help': 'use vfp3 instructions when building the snapshot [Arm only]'
},
+ 'fpu': {
+ 'values': ['on', 'off'],
+ 'default': 'on',
+ 'help': 'use fpu instructions when building the snapshot [MIPS only]'
+ },
}
diff --git a/src/api.cc b/src/api.cc
index c852b97..49a026b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3064,8 +3064,11 @@
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
+ bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
- return !result.is_null() && !result->IsUndefined();
+ if (result.is_null() || result->IsUndefined()) return false;
+ if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
+ return true;
}
@@ -3694,6 +3697,94 @@
}
+// Will fail with a negative answer if the recursion depth is too high.
+static int RecursivelySerializeToUtf8(i::String* string,
+ char* buffer,
+ int start,
+ int end,
+ int recursion_budget,
+ int32_t previous_character,
+ int32_t* last_character) {
+ int utf8_bytes = 0;
+ while (true) {
+ if (string->IsAsciiRepresentation()) {
+ i::String::WriteToFlat(string, buffer, start, end);
+ *last_character = unibrow::Utf16::kNoPreviousCharacter;
+ return utf8_bytes + end - start;
+ }
+ switch (i::StringShape(string).representation_tag()) {
+ case i::kExternalStringTag: {
+ const uint16_t* data = i::ExternalTwoByteString::cast(string)->
+ ExternalTwoByteStringGetData(0);
+ char* current = buffer;
+ for (int i = start; i < end; i++) {
+ uint16_t character = data[i];
+ current +=
+ unibrow::Utf8::Encode(current, character, previous_character);
+ previous_character = character;
+ }
+ *last_character = previous_character;
+ return static_cast<int>(utf8_bytes + current - buffer);
+ }
+ case i::kSeqStringTag: {
+ const uint16_t* data =
+ i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
+ char* current = buffer;
+ for (int i = start; i < end; i++) {
+ uint16_t character = data[i];
+ current +=
+ unibrow::Utf8::Encode(current, character, previous_character);
+ previous_character = character;
+ }
+ *last_character = previous_character;
+ return static_cast<int>(utf8_bytes + current - buffer);
+ }
+ case i::kSlicedStringTag: {
+ i::SlicedString* slice = i::SlicedString::cast(string);
+ unsigned offset = slice->offset();
+ string = slice->parent();
+ start += offset;
+ end += offset;
+ continue;
+ }
+ case i::kConsStringTag: {
+ i::ConsString* cons_string = i::ConsString::cast(string);
+ i::String* first = cons_string->first();
+ int boundary = first->length();
+ if (start >= boundary) {
+ // Only need RHS.
+ string = cons_string->second();
+ start -= boundary;
+ end -= boundary;
+ continue;
+ } else if (end <= boundary) {
+ // Only need LHS.
+ string = first;
+ } else {
+ if (recursion_budget == 0) return -1;
+ int extra_utf8_bytes =
+ RecursivelySerializeToUtf8(first,
+ buffer,
+ start,
+ boundary,
+ recursion_budget - 1,
+ previous_character,
+ &previous_character);
+ if (extra_utf8_bytes < 0) return extra_utf8_bytes;
+ buffer += extra_utf8_bytes;
+ utf8_bytes += extra_utf8_bytes;
+ string = cons_string->second();
+ start = 0;
+ end -= boundary;
+ }
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
bool String::MayContainNonAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
@@ -3712,11 +3803,12 @@
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
- len = str->length();
+ len = string_length;
} else {
len = i::Min(capacity, str->length());
}
@@ -3729,6 +3821,42 @@
return len;
}
+ if (capacity == -1 || capacity / 3 >= string_length) {
+ int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
+ const int kMaxRecursion = 100;
+ int utf8_bytes =
+ RecursivelySerializeToUtf8(*str,
+ buffer,
+ 0,
+ string_length,
+ kMaxRecursion,
+ previous,
+ &previous);
+ if (utf8_bytes >= 0) {
+ // Success serializing with recursion.
+ if ((options & NO_NULL_TERMINATION) == 0 &&
+ (capacity > utf8_bytes || capacity == -1)) {
+ buffer[utf8_bytes++] = '\0';
+ }
+ if (nchars_ref != NULL) *nchars_ref = string_length;
+ return utf8_bytes;
+ }
+ FlattenString(str);
+ // Recurse once. This time around the string is flat and the serializing
+ // with recursion will certainly succeed.
+ return WriteUtf8(buffer, capacity, nchars_ref, options);
+ } else if (capacity >= string_length) {
+ // First check that the buffer is large enough. If it is, then recurse
+ // once without a capacity limit, which will get into the other branch of
+ // this 'if'.
+ int utf8_bytes = i::Utf8Length(str);
+ if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
+ if (utf8_bytes <= capacity) {
+ return WriteUtf8(buffer, -1, nchars_ref, options);
+ }
+ }
+
+ // Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index dd8ffcd..d5db686 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -80,7 +80,7 @@
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
@@ -364,8 +364,14 @@
}
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_external_target_at(Address constant_pool_entry,
+ Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 85be7d6..ec28da4 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -139,7 +139,6 @@
}
-
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -258,6 +257,7 @@
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
+const Instr kBlxIp = al | kBlxRegPattern | ip.code();
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e1fa3cf..e2d5f59 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -604,6 +604,7 @@
extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kBlxIp;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
@@ -685,20 +686,18 @@
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
+ Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index d9a4d4b..7b2a3c4 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -108,6 +108,10 @@
}
+static const int32_t kBranchBeforeStackCheck = 0x2a000001;
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
@@ -118,10 +122,16 @@
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
- (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
@@ -155,13 +165,21 @@
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+4, cs);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ patcher.masm()->b(+4, cs);
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index ae8d822..0cbd46e 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -109,7 +110,9 @@
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
return 24;
}
@@ -132,32 +135,11 @@
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- // We can optionally optimize based on counters rather than statistical
- // sampling.
- if (info->ShouldSelfOptimize()) {
- if (FLAG_trace_opt_verbose) {
- PrintF("[adding self-optimization header to %s]\n",
- *info->function()->debug_name()->ToCString());
- }
- has_self_optimization_header_ = true;
- MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
- Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
- JSGlobalPropertyCell* cell;
- if (maybe_cell->To(&cell)) {
- __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- Handle<Code> compile_stub(
- isolate()->builtins()->builtin(Builtins::kLazyRecompile));
- __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq);
- ASSERT(masm_->pc_offset() == self_optimization_header_size());
- }
- }
-
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -336,20 +318,68 @@
}
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r2, Operand(profiling_counter_));
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+ // Self-optimization is a one-off thing: if it fails, don't try again.
+ reset_value = Smi::kMaxValue;
+ }
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
+ __ mov(r2, Operand(profiling_counter_));
+ __ mov(r3, Operand(Smi::FromInt(reset_value)));
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
+
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -371,6 +401,32 @@
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else if (FLAG_weighted_back_edges) {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(r2);
+ __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+ } else {
+ InterruptStub stub;
+ __ CallStub(&stub);
+ }
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+ }
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -888,7 +944,7 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
@@ -1186,7 +1242,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, mode);
+ CallIC(ic, mode);
}
@@ -1270,7 +1326,7 @@
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
break;
}
@@ -1481,7 +1537,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1749,7 +1805,7 @@
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1757,7 +1813,7 @@
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1784,7 +1840,7 @@
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1867,7 +1923,7 @@
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -1908,7 +1964,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1921,7 +1977,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
}
@@ -1938,7 +1994,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -2056,7 +2112,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2102,7 +2158,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2136,6 +2192,14 @@
}
}
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+ ic_total_count_++;
+ __ Call(code, rmode, ast_id);
+}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@@ -2153,7 +2217,7 @@
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2186,7 +2250,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3783,7 +3847,7 @@
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3938,7 +4002,7 @@
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -4049,7 +4113,7 @@
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4081,7 +4145,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4098,7 +4162,7 @@
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4124,7 +4188,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic);
+ CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4307,7 +4371,7 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index cdc1947..f48e08e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1077,7 +1077,8 @@
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements(
+ current_block_->last_environment()->outer() != NULL));
}
@@ -2271,6 +2272,9 @@
undefined,
instr->call_kind(),
instr->is_construct());
+ if (instr->materializes_arguments()) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ }
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2278,10 +2282,21 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (instr->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LPop(argument_count);
+ argument_count_ -= argument_count;
+ }
+
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return NULL;
+
+ return pop;
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 62cde6e..6d799ca 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -179,7 +179,8 @@
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
- V(WrapReceiver)
+ V(WrapReceiver) \
+ V(Pop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -534,9 +535,15 @@
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
- LArgumentsElements() { }
+ explicit LArgumentsElements(bool from_inlined)
+ : from_inlined_(from_inlined) { }
+
+ bool from_inlined() const { return from_inlined_; }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+
+ private:
+ bool from_inlined_;
};
@@ -1378,6 +1385,19 @@
};
+class LPop: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LPop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Pop, "pop")
+
+ private:
+ int count_;
+};
+
+
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 3e286ef..c8f1e17 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2764,16 +2764,20 @@
Register scratch = scratch0();
Register result = ToRegister(instr->result());
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ if (instr->from_inlined()) {
+ __ add(result, sp, Operand(-2 * kPointerSize));
+ } else {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ mov(result, fp, LeaveCC, ne);
+ __ mov(result, scratch, LeaveCC, eq);
+ }
}
@@ -2907,6 +2911,11 @@
}
+void LCodeGen::DoPop(LPop* instr) {
+ __ Drop(instr->count());
+}
+
+
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -4607,34 +4616,51 @@
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, elements_offset + i));
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
+ // Copy elements backing store content.
+ int elements_length = has_elements ? elements->length() : 0;
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ // We only support little endian mode...
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(r2, Operand(value_low));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ mov(r2, Operand(value_high));
+ __ str(r2, FieldMemOperand(result, total_offset + 4));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
} else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
+ UNREACHABLE();
}
}
}
diff --git a/src/d8.cc b/src/d8.cc
index ad35af6..45781cf 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1436,6 +1436,13 @@
}
if (!options.last_run) {
context.Dispose();
+#if !defined(V8_SHARED)
+ if (i::FLAG_send_idle_notification) {
+ const int kLongIdlePauseInMs = 1000;
+ V8::ContextDisposedNotification();
+ V8::IdleNotification(kLongIdlePauseInMs);
+ }
+#endif // !V8_SHARED
}
#ifndef V8_SHARED
@@ -1490,6 +1497,7 @@
int stress_runs = i::FLAG_stress_runs;
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+ options.last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
#endif
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index eeaa95e..0668add 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -350,6 +350,9 @@
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
+
+DEFINE_bool(send_idle_notification, false,
+ "Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
diff --git a/src/heap.cc b/src/heap.cc
index d298668..a1cccf6 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -92,6 +92,7 @@
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
+ global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -3393,6 +3394,7 @@
code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
+ code->set_ic_age(global_ic_age_);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4838,13 +4840,21 @@
bool Heap::IdleNotification(int hint) {
- intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
+ const int kMaxHint = 1000;
+ intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
// The size factor is in range [3..100].
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
+ if (hint >= kMaxHint) {
+ // The embedder is requesting a lot of GC work after context disposal,
+ // we age inline caches so that they don't keep objects from
+ // the old context alive.
+ AgeInlineCaches();
+ }
int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
- if (hint >= mark_sweep_time && !FLAG_expose_gc) {
+ if (hint >= mark_sweep_time && !FLAG_expose_gc &&
+ incremental_marking()->IsStopped()) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: contexts disposed");
@@ -4859,7 +4869,7 @@
return false;
}
- if (hint >= 1000 || !FLAG_incremental_marking ||
+ if (hint >= kMaxHint || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
return IdleGlobalGC();
}
diff --git a/src/heap.h b/src/heap.h
index 672b490..2bd037f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1480,6 +1480,13 @@
void ClearNormalizedMapCaches();
+ // Clears the cache of ICs related to this map.
+ void ClearCacheOnMap(Map* map) {
+ if (FLAG_cleanup_code_caches_at_gc) {
+ map->ClearCodeCache(this);
+ }
+ }
+
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
@@ -1585,6 +1592,16 @@
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
+ // Global inline caching age: it is incremented on some GCs after context
+ // disposal. We use it to flush inline caches.
+ int global_ic_age() {
+ return global_ic_age_;
+ }
+
+ void AgeInlineCaches() {
+ ++global_ic_age_;
+ }
+
private:
Heap();
@@ -1612,6 +1629,8 @@
// For keeping track of context disposals.
int contexts_disposed_;
+ int global_ic_age_;
+
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index f698da4..3231704 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -599,6 +599,9 @@
SetBlock(block);
previous->next_ = this;
if (next != NULL) next->previous_ = this;
+ if (block->last() == previous) {
+ block->set_last(this);
+ }
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fb5879f..93e14be 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1353,12 +1353,15 @@
int arguments_count,
FunctionLiteral* function,
CallKind call_kind,
- bool is_construct)
+ bool is_construct,
+ Variable* arguments)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind),
- is_construct_(is_construct) {
+ is_construct_(is_construct),
+ arguments_(arguments),
+ materializes_arguments_(false) {
}
virtual void PrintDataTo(StringStream* stream);
@@ -1373,6 +1376,13 @@
return Representation::None();
}
+ bool materializes_arguments() { return materializes_arguments_; }
+ void set_materializes_arguments(bool materializes_arguments) {
+ materializes_arguments_ = materializes_arguments;
+ }
+
+ Variable* arguments() { return arguments_; }
+
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
@@ -1381,18 +1391,28 @@
FunctionLiteral* function_;
CallKind call_kind_;
bool is_construct_;
+ Variable* arguments_;
+ bool materializes_arguments_;
};
class HLeaveInlined: public HTemplateInstruction<0> {
public:
- HLeaveInlined() {}
+ explicit HLeaveInlined(bool arguments_pushed)
+ : arguments_pushed_(arguments_pushed) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ bool arguments_pushed() {
+ return arguments_pushed_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+ bool arguments_pushed_;
};
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 465bf44..838ff32 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -113,7 +113,6 @@
first_ = last_ = entry;
}
instr->InsertAfter(last_);
- last_ = instr;
}
@@ -165,11 +164,15 @@
}
-void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
+void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
+ bool drop_extra = state != NULL && state->drop_extra();
+ bool arguments_pushed = state != NULL && state->arguments_pushed();
+
if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined);
+ AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
last_environment_ = last_environment()->DiscardInlined(drop_extra);
}
+
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
Finish(instr);
@@ -178,10 +181,13 @@
void HBasicBlock::AddLeaveInlined(HValue* return_value,
HBasicBlock* target,
- bool drop_extra) {
+ FunctionState* state) {
+ bool drop_extra = state != NULL && state->drop_extra();
+ bool arguments_pushed = state != NULL && state->arguments_pushed();
+
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined);
+ AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
last_environment_ = last_environment()->DiscardInlined(drop_extra);
last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
@@ -2178,6 +2184,8 @@
return_handling_(return_handling),
function_return_(NULL),
test_context_(NULL),
+ entry_(NULL),
+ arguments_elements_(NULL),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -2337,8 +2345,8 @@
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
- empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+ empty_true->Goto(if_true(), owner()->function_state());
+ empty_false->Goto(if_false(), owner()->function_state());
owner()->set_current_block(NULL);
}
@@ -2359,8 +2367,8 @@
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
- empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
- empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+ empty_true->Goto(if_true(), owner()->function_state());
+ empty_false->Goto(if_false(), owner()->function_state());
builder->set_current_block(NULL);
}
@@ -2851,10 +2859,10 @@
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), function_state()->drop_extra());
+ current_block()->Goto(test->if_true(), function_state());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state()->drop_extra());
+ current_block()->Goto(function_return(), function_state());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
@@ -2871,10 +2879,10 @@
current_block()->Finish(typecheck);
if_spec_object->AddLeaveInlined(return_value,
function_return(),
- function_state()->drop_extra());
+ function_state());
not_spec_object->AddLeaveInlined(receiver,
function_return(),
- function_state()->drop_extra());
+ function_state());
}
} else {
// Return from an inlined function, visit the subexpression in the
@@ -2886,14 +2894,14 @@
test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state()->drop_extra());
+ current_block()->Goto(function_return(), function_state());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
current_block()->AddLeaveInlined(return_value,
function_return(),
- function_state()->drop_extra());
+ function_state());
}
}
set_current_block(NULL);
@@ -3675,22 +3683,27 @@
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
- if (!boilerplate->HasFastElements()) return false;
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value = JSObject::GetElement(boilerplate, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties,
- total_size)) {
- return false;
+ if (boilerplate->HasFastDoubleElements()) {
+ *total_size += FixedDoubleArray::SizeFor(elements->length());
+ } else if (boilerplate->HasFastElements()) {
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value = JSObject::GetElement(boilerplate, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteral(value_object,
+ max_depth - 1,
+ max_properties,
+ total_size)) {
+ return false;
+ }
}
}
+ *total_size += FixedArray::SizeFor(length);
+ } else {
+ return false;
}
- *total_size += FixedArray::SizeFor(length);
}
Handle<FixedArray> properties(boilerplate->properties());
@@ -4932,31 +4945,70 @@
return false;
}
- // Our implementation of arguments (based on this stack frame or an
- // adapter below it) does not work for inlined functions.
if (function_state()->outer() != NULL) {
- Bailout("arguments access in inlined function");
- return true;
+ // Push arguments when entering inlined function.
+ if (!function_state()->arguments_pushed()) {
+ HEnvironment* arguments_env = environment()->arguments_environment();
+
+ HInstruction* insert_after = function_state()->entry();
+ ASSERT(insert_after->IsEnterInlined());
+ HEnterInlined::cast(insert_after)->set_materializes_arguments(true);
+
+ for (int i = 0; i < arguments_env->parameter_count(); i++) {
+ HValue* argument = arguments_env->Lookup(i);
+ HInstruction* push_argument = new(zone()) HPushArgument(argument);
+ push_argument->InsertAfter(insert_after);
+ insert_after = push_argument;
+ }
+
+ HInstruction* arguments_elements = new(zone()) HArgumentsElements();
+ arguments_elements->ClearFlag(HValue::kUseGVN);
+ arguments_elements->InsertAfter(insert_after);
+ function_state()->set_arguments_elements(arguments_elements);
+ }
}
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
if (!name->IsEqualTo(CStrVector("length"))) return false;
- HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
- result = new(zone()) HArgumentsLength(elements);
+
+ if (function_state()->outer() == NULL) {
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ result = new(zone()) HArgumentsLength(elements);
+ } else {
+ // Number of arguments without receiver.
+ int argument_count = environment()->
+ arguments_environment()->parameter_count() - 1;
+ result = new(zone()) HConstant(
+ Handle<Object>(Smi::FromInt(argument_count)),
+ Representation::Integer32());
+ }
} else {
Push(graph()->GetArgumentsObject());
VisitForValue(expr->key());
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* key = Pop();
Drop(1); // Arguments object.
- HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
- HInstruction* length = AddInstruction(
- new(zone()) HArgumentsLength(elements));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ if (function_state()->outer() == NULL) {
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HInstruction* length = AddInstruction(
+ new(zone()) HArgumentsLength(elements));
+ HInstruction* checked_key =
+ AddInstruction(new(zone()) HBoundsCheck(key, length));
+ result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ } else {
+ // Number of arguments without receiver.
+ HInstruction* elements = function_state()->arguments_elements();
+ int argument_count = environment()->
+ arguments_environment()->parameter_count() - 1;
+ HInstruction* length = AddInstruction(new(zone()) HConstant(
+ Handle<Object>(Smi::FromInt(argument_count)),
+ Representation::Integer32()));
+ HInstruction* checked_key =
+ AddInstruction(new(zone()) HBoundsCheck(key, length));
+ result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ }
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -5364,19 +5416,24 @@
AddInstruction(context);
inner_env->BindContext(context);
#endif
- AddSimulate(return_id);
- current_block()->UpdateEnvironment(inner_env);
- AddInstruction(new(zone()) HEnterInlined(target,
- arguments->length(),
- function,
- call_kind,
- function_state()->is_construct()));
// If the function uses arguments object create and bind one.
if (function->scope()->arguments() != NULL) {
ASSERT(function->scope()->arguments()->IsStackAllocated());
- environment()->Bind(function->scope()->arguments(),
- graph()->GetArgumentsObject());
+ inner_env->Bind(function->scope()->arguments(),
+ graph()->GetArgumentsObject());
}
+
+ AddSimulate(return_id);
+ current_block()->UpdateEnvironment(inner_env);
+
+ HInstruction* enter_inlined =
+ AddInstruction(new(zone()) HEnterInlined(target,
+ arguments->length(),
+ function,
+ call_kind,
+ function_state()->is_construct(),
+ function->scope()->arguments()));
+ function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
if (HasStackOverflow()) {
@@ -5405,17 +5462,17 @@
: undefined;
current_block()->AddLeaveInlined(return_value,
function_return(),
- function_state()->drop_extra());
+ function_state());
} else if (call_context()->IsEffect()) {
ASSERT(function_return() != NULL);
- current_block()->Goto(function_return(), function_state()->drop_extra());
+ current_block()->Goto(function_return(), function_state());
} else {
ASSERT(call_context()->IsTest());
ASSERT(inlined_test_context() != NULL);
HBasicBlock* target = function_state()->is_construct()
? inlined_test_context()->if_true()
: inlined_test_context()->if_false();
- current_block()->Goto(target, function_state()->drop_extra());
+ current_block()->Goto(target, function_state());
}
}
@@ -5433,12 +5490,12 @@
if (if_true->HasPredecessor()) {
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, function_state()->drop_extra());
+ if_true->Goto(true_target, function_state());
}
if (if_false->HasPredecessor()) {
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, function_state()->drop_extra());
+ if_false->Goto(false_target, function_state());
}
set_current_block(NULL);
return true;
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e2779bb..82e3119 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -42,6 +42,7 @@
// Forward declarations.
class BitVector;
+class FunctionState;
class HEnvironment;
class HGraph;
class HLoopInformation;
@@ -121,7 +122,7 @@
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, bool drop_extra = false);
+ void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@@ -136,7 +137,7 @@
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
HBasicBlock* target,
- bool drop_extra = false);
+ FunctionState* state = NULL);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@@ -715,6 +716,16 @@
FunctionState* outer() { return outer_; }
+ HInstruction* entry() { return entry_; }
+ void set_entry(HInstruction* entry) { entry_ = entry; }
+
+ HInstruction* arguments_elements() { return arguments_elements_; }
+ void set_arguments_elements(HInstruction* arguments_elements) {
+ arguments_elements_ = arguments_elements;
+ }
+
+ bool arguments_pushed() { return arguments_elements() != NULL; }
+
private:
HGraphBuilder* owner_;
@@ -741,6 +752,12 @@
// return blocks. NULL in all other cases.
TestContext* test_context_;
+ // When inlining HEnterInlined instruction corresponding to the function
+ // entry.
+ HInstruction* entry_;
+
+ HInstruction* arguments_elements_;
+
FunctionState* outer_;
};
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index ef10922..3cf0d00 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -88,7 +88,7 @@
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return Assembler::kSpecialTargetSize;
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 5a08a2c..929b485 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -598,8 +598,8 @@
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -610,8 +610,7 @@
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 0f26d6f..62a2c2a 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -100,7 +101,9 @@
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
return 13;
}
@@ -321,12 +324,20 @@
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 100;
+
+
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -337,7 +348,8 @@
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(127, Max(1, distance / 100));
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -398,7 +410,8 @@
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
- weight = Min(127, Max(1, distance / 100));
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 35a48b1..2c5a167 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2543,25 +2543,29 @@
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
+ if (instr->from_inlined()) {
+ __ lea(result, Operand(esp, -2 * kPointerSize));
+ } else {
+ // Check for arguments adapter frame.
+ Label done, adapted;
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(result),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted, Label::kNear);
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
+ // No arguments adaptor frame.
+ __ mov(result, Operand(ebp));
+ __ jmp(&done, Label::kNear);
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
+ }
}
@@ -2683,6 +2687,11 @@
}
+void LCodeGen::DoPop(LPop* instr) {
+ __ Drop(instr->count());
+}
+
+
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -4461,33 +4470,47 @@
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ mov(ecx, FieldOperand(source, i));
__ mov(FieldOperand(result, elements_offset + i), ecx);
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(FieldOperand(result, total_offset), Immediate(value_low));
+ __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(ecx, Operand(result, *offset));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ } else {
+ __ mov(FieldOperand(result, total_offset), Immediate(value));
+ }
+ }
} else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
+ UNREACHABLE();
}
}
}
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 2bfbb67..10131d4 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1083,7 +1083,8 @@
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements(
+ current_block_->last_environment()->outer() != NULL));
}
@@ -2380,6 +2381,9 @@
undefined,
instr->call_kind(),
instr->is_construct());
+ if (instr->materializes_arguments()) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ }
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2387,10 +2391,20 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (instr->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LPop(argument_count);
+ argument_count_ -= argument_count;
+ }
+
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return NULL;
+ return pop;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4ecce96..3fa21f9 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -174,7 +174,8 @@
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
- V(WrapReceiver)
+ V(WrapReceiver) \
+ V(Pop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -525,9 +526,15 @@
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
- LArgumentsElements() { }
+ explicit LArgumentsElements(bool from_inlined)
+ : from_inlined_(from_inlined) { }
+
+ bool from_inlined() const { return from_inlined_; }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+
+ private:
+ bool from_inlined_;
};
@@ -1401,6 +1408,19 @@
};
+class LPop: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LPop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Pop, "pop")
+
+ private:
+ int count_;
+};
+
+
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index c4f166b..8fe89b4 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -178,7 +178,12 @@
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+ && (target->ic_age() != heap_->global_ic_age())) {
+ IC::Clear(rinfo->pc());
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ }
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
MarkObject(target);
}
@@ -795,6 +800,12 @@
Map* map = obj->map();
if (map == filler_map) continue;
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ heap_->ClearCacheOnMap(map);
+ }
+
+
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 0a2c174..9fb16fb 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -49,6 +49,16 @@
}
+bool Isolate::IsDebuggerActive() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (!NoBarrier_Load(&debugger_initialized_)) return false;
+ return debugger()->IsDebuggerActive();
+#else
+ return false;
+#endif
+}
+
+
bool Isolate::DebuggerHasBreakPoints() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();
diff --git a/src/isolate.h b/src/isolate.h
index ea94b8d..0c5a54c 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -929,6 +929,7 @@
}
#endif
+ inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
#ifdef DEBUG
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
index c9ed66f..43f6b89 100644
--- a/src/mark-compact-inl.h
+++ b/src/mark-compact-inl.h
@@ -52,13 +52,6 @@
}
-void MarkCompactCollector::ClearCacheOnMap(Map* map) {
- if (FLAG_cleanup_code_caches_at_gc) {
- map->ClearCodeCache(heap());
- }
-}
-
-
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@@ -88,7 +81,7 @@
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
if (obj->IsMap()) {
- ClearCacheOnMap(Map::cast(obj));
+ heap_->ClearCacheOnMap(Map::cast(obj));
}
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index b956e73..dde172d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1049,7 +1049,8 @@
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_state() == MEGAMORPHIC ||
- heap->mark_compact_collector()->flush_monomorphic_ics_)) {
+ heap->mark_compact_collector()->flush_monomorphic_ics_ ||
+ target->ic_age() != heap->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
@@ -1797,7 +1798,7 @@
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
- ClearCacheOnMap(map);
+ heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 442ad1d..66ffd19 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -638,9 +638,6 @@
// Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
- // Clears the cache of ICs related to this map.
- INLINE(void ClearCacheOnMap(Map* map));
-
void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index f9e75fa..2ff4710 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -117,13 +117,31 @@
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(pc_);
+ ASSERT(IsCodeTarget(rmode_) ||
+ rmode_ == RUNTIME_ENTRY ||
+ rmode_ == EMBEDDED_OBJECT ||
+ rmode_ == EXTERNAL_REFERENCE);
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return Assembler::kSpecialTargetSize;
}
@@ -281,7 +299,7 @@
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -307,7 +325,7 @@
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index d039539..0d7f921 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -850,7 +850,6 @@
return rmode != RelocInfo::NONE;
}
-
void Assembler::GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index b1ffc45..8b877f6 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -553,10 +553,13 @@
static void JumpLabelToJumpRegister(Address pc);
// This sets the branch destination (which gets loaded at the call address).
- // This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
+ set_target_address_at(
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ target);
}
// This sets the branch destination.
@@ -578,8 +581,7 @@
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
- static const int kCallTargetSize = 0 * kInstrSize;
- static const int kExternalTargetSize = 0 * kInstrSize;
+ static const int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit constant.
// Before jump-optimizations, this constant was used in
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 60fc72f..eeb84c3 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1097,8 +1097,6 @@
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 03755b2..1b3242c 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -3942,13 +3942,16 @@
// Special handling of out of memory exceptions.
Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(USE_DELAY_SLOT, throw_out_of_memory_exception, eq,
- v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ Branch(USE_DELAY_SLOT,
+ throw_out_of_memory_exception,
+ eq,
+ v0,
+ Operand(reinterpret_cast<int32_t>(out_of_memory)));
// If we throw the OOM exception, the value of a3 doesn't matter.
// Any instruction can be in the delay slot that's not a jump.
// Retrieve the pending exception and clear the variable.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
@@ -3956,8 +3959,8 @@
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ Branch(throw_termination_exception, eq,
- v0, Operand(isolate->factory()->termination_exception()));
+ __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
+ __ Branch(throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
__ jmp(throw_normal_exception);
@@ -4084,6 +4087,7 @@
offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
}
+ __ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
// We build an EntryFrame.
@@ -4156,7 +4160,7 @@
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ li(t1, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(t1, MemOperand(t0));
@@ -4200,7 +4204,9 @@
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
- __ Branch(&non_outermost_js_2, ne, t1,
+ __ Branch(&non_outermost_js_2,
+ ne,
+ t1,
Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
@@ -4365,8 +4371,10 @@
__ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
// Null is not instance of anything.
- __ Branch(&object_not_null, ne, scratch,
- Operand(masm->isolate()->factory()->null_value()));
+ __ Branch(&object_not_null,
+ ne,
+ scratch,
+ Operand(masm->isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -4471,8 +4479,10 @@
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne,
- a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Branch(&runtime,
+ ne,
+ a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -4504,7 +4514,9 @@
Label adaptor_frame, try_allocate;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a2,
+ __ Branch(&adaptor_frame,
+ eq,
+ a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
@@ -5115,14 +5127,11 @@
// Check the result.
Label success;
- __ Branch(&success, eq,
- v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Label failure;
- __ Branch(&failure, eq,
- v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
- __ Branch(&runtime, ne,
- v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
@@ -5875,10 +5884,8 @@
__ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
__ Or(c1, c1, scratch1);
__ bind(&tmp);
- __ Branch(not_found,
- Uless_equal,
- scratch,
- Operand(static_cast<int>('9' - '0')));
+ __ Branch(
+ not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
__ bind(¬_array_index);
// Calculate the two character string hash.
@@ -6548,8 +6555,7 @@
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2,
- Operand(ConsString::kMinLength));
+ __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
@@ -7076,8 +7082,10 @@
// Push return address (accessible to GC through exit frame pc).
// This spot for ra was reserved in EnterExitFrame.
masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET), true);
+ masm->li(ra,
+ Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ CONSTANT_SIZE);
// Call the function.
masm->Jump(t9);
// Make sure the stored 'ra' points to this position.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 0648eaf..3df2fc6 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -120,7 +120,7 @@
int FullCodeGenerator::self_optimization_header_size() {
- return 11 * Instruction::kInstrSize;
+ return 10 * Instruction::kInstrSize;
}
@@ -2613,7 +2613,7 @@
Label entry, loop;
// The use of t2 to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
- __ li(t2, Operand(FACTORY->value_of_symbol()));
+ __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 7760877..2c4da1a 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -758,8 +758,6 @@
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -773,10 +771,12 @@
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
+ __ CheckMap(scratch1,
+ scratch2,
+ Heap::kNonStrictArgumentsElementsMapRootIndex,
+ slow_case,
+ DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -820,8 +820,10 @@
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ __ CheckMap(backing_store,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ slow_case,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
@@ -1253,8 +1255,9 @@
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&check_if_double_array, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ Branch(
+ &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
+
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@@ -1262,8 +1265,7 @@
__ Branch(&fast_object_without_map_check);
__ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
@@ -1285,8 +1287,10 @@
Register scratch_value = t0;
Register address = t1;
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&fast_double_with_map_check, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ Branch(&fast_double_with_map_check,
+ ne,
+ elements_map,
+ Heap::kFixedArrayMapRootIndex);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
@@ -1323,8 +1327,7 @@
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 909422f..f6f0c94 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2077,7 +2077,7 @@
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
- __ li(result, Operand(factory()->the_hole_value()), true);
+ __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2132,7 +2132,7 @@
__ bind(&before_push_delta);
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ li(temp, Operand(delta * kPointerSize), true);
+ __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
CallCodeGeneric(stub.GetCode(),
@@ -2651,16 +2651,20 @@
Register temp = scratch1();
Register result = ToRegister(instr->result());
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ if (instr->from_inlined()) {
+ __ Subu(result, sp, 2 * kPointerSize);
+ } else {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
- __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
+ __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ }
}
@@ -2793,6 +2797,11 @@
}
+void LCodeGen::DoPop(LPop* instr) {
+ __ Drop(instr->count());
+}
+
+
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 1e0c216..dca90a1 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1076,7 +1076,8 @@
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements(
+ current_block_->last_environment()->outer() != NULL));
}
@@ -2277,6 +2278,9 @@
undefined,
instr->call_kind(),
instr->is_construct());
+ if (instr->materializes_arguments()) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ }
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2284,10 +2288,21 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (instr->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LPop(argument_count);
+ argument_count_ -= argument_count;
+ }
+
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return NULL;
+
+ return pop;
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 5a7bf4d..8fb80b6 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -179,7 +179,8 @@
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
- V(WrapReceiver)
+ V(WrapReceiver) \
+ V(Pop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -533,9 +534,15 @@
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
- LArgumentsElements() { }
+ explicit LArgumentsElements(bool from_inlined)
+ : from_inlined_(from_inlined) { }
+
+ bool from_inlined() const { return from_inlined_; }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+
+ private:
+ bool from_inlined_;
};
@@ -1358,6 +1365,19 @@
};
+class LPop: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LPop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Pop, "pop")
+
+ private:
+ int count_;
+};
+
+
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 4578014..e93a417 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -768,13 +768,12 @@
}
}
-
//------------Pseudo-instructions-------------
-void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && !gen2instr) {
+ if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
@@ -786,7 +785,7 @@
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseReg(j.rmode_) || gen2instr) {
+ } else {
if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
@@ -1647,6 +1646,16 @@
}
+void MacroAssembler::Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot) {
+ LoadRoot(at, index);
+ Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
@@ -2541,7 +2550,7 @@
// Must record previous source positions before the
// li() generates a new code target.
positions_recorder()->WriteRecordedPositions();
- li(t9, Operand(target_int, rmode), true);
+ li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
@@ -2752,7 +2761,7 @@
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- li(t1, Operand(CodeObject()));
+ li(t1, Operand(CodeObject()), CONSTANT_SIZE);
li(t2, Operand(state));
// Push the frame pointer, context, state, and code object.
@@ -3381,7 +3390,7 @@
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
- isolate()->factory()->heap_number_map(),
+ Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
@@ -4493,7 +4502,7 @@
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()));
+ li(t9, Operand(CodeObject()), CONSTANT_SIZE);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
@@ -4537,7 +4546,8 @@
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ // Accessed from ExitFrame::code_slot.
+ li(t8, Operand(CodeObject()), CONSTANT_SIZE);
sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
@@ -5263,7 +5273,7 @@
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, ¬_smi);
- li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
bind(¬_smi);
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 71f99e6..f57418f 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -81,6 +81,16 @@
PROTECT
};
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 16 bits, then
+ // optimize the li to use a single instruction, rather than lui/ori pair.
+ OPTIMIZE_SIZE = 0,
+ // Always use 2 instructions (lui/ori pair), even if the constant could
+ // be loaded with just one, so that this value is patchable later.
+ CONSTANT_SIZE = 1
+};
+
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
@@ -184,6 +194,12 @@
Ret(cond, rs, rt, bd);
}
+ void Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot = PROTECT);
+
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -248,7 +264,6 @@
Branch(L);
}
-
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -579,12 +594,13 @@
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
// Load int32 in the rd register.
- void li(Register rd, Operand j, bool gen2instr = false);
- inline void li(Register rd, int32_t j, bool gen2instr = false) {
- li(rd, Operand(j), gen2instr);
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
}
- inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
- li(dst, Operand(value), gen2instr);
+ inline void li(Register dst, Handle<Object> value,
+ LiFlags mode = OPTIMIZE_SIZE) {
+ li(dst, Operand(value), mode);
}
// Push multiple registers on the stack.
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index f171b6f..ae4da93 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -386,7 +386,7 @@
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Check if function returned non-zero for success or zero for failure.
@@ -698,7 +698,7 @@
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
// Load previous char as initial value of current character register.
Label at_start;
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
@@ -783,7 +783,7 @@
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -813,7 +813,7 @@
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
// Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
@@ -1010,7 +1010,7 @@
__ PrepareCallCFunction(num_arguments, scratch);
__ mov(a2, frame_pointer());
// Code* of self.
- __ li(a1, Operand(masm_->CodeObject()));
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// a0 becomes return address pointer.
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
@@ -1229,7 +1229,7 @@
if (OS::ActivationFrameAlignment() != 0) {
__ lw(sp, MemOperand(sp, 16));
}
- __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f27a436..78578cc 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -944,6 +944,12 @@
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INT64_FIELD(p, offset) \
+ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT64_FIELD(p, offset, value) \
+ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_SHORT_FIELD(p, offset) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
@@ -1701,6 +1707,12 @@
return result;
}
+int64_t FixedDoubleArray::get_representation(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ ASSERT(index >= 0 && index < this->length());
+ return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
+}
MaybeObject* FixedDoubleArray::get(int index) {
if (is_the_hole(index)) {
@@ -4118,7 +4130,7 @@
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
-
+INT_ACCESSORS(Code, ic_age, kICAgeOffset)
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
@@ -4701,6 +4713,7 @@
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
+ // - IncrementalMarking::Step
ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
}
diff --git a/src/objects.cc b/src/objects.cc
index 9d20f6c..fa4f943 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -4324,21 +4324,20 @@
static bool UpdateGetterSetterInDictionary(
SeededNumberDictionary* dictionary,
uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
- // TODO(mstarzinger): We should check for details.IsDontDelete() here once
- // we only call into the runtime once to set both getter and setter.
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
+ ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(entry,
PropertyDetails(attributes, CALLBACKS, index));
}
- AccessorPair::cast(result)->set(component, fun);
+ AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
}
}
@@ -4347,8 +4346,8 @@
MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
@@ -4369,8 +4368,8 @@
case DICTIONARY_ELEMENTS:
if (UpdateGetterSetterInDictionary(element_dictionary(),
index,
- component,
- fun,
+ getter,
+ setter,
attributes)) {
return GetHeap()->undefined_value();
}
@@ -4390,8 +4389,8 @@
SeededNumberDictionary::cast(arguments);
if (UpdateGetterSetterInDictionary(dictionary,
index,
- component,
- fun,
+ getter,
+ setter,
attributes)) {
return GetHeap()->undefined_value();
}
@@ -4405,23 +4404,22 @@
{ MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
}
- accessors->set(component, fun);
+ accessors->SetComponents(getter, setter);
return SetElementCallback(index, accessors, attributes);
}
MaybeObject* JSObject::DefinePropertyAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
// Lookup the name.
LookupResult result(GetHeap()->isolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsFound()) {
- // TODO(mstarzinger): We should check for result.IsDontDelete() here once
- // we only call into the runtime once to set both getter and setter.
if (result.type() == CALLBACKS) {
+ ASSERT(!result.IsDontDelete());
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
if (obj->IsAccessorPair()) {
@@ -4430,7 +4428,7 @@
AccessorPair::cast(obj)->CopyWithoutTransitions();
if (!maybe_copy->To(©)) return maybe_copy;
}
- copy->set(component, fun);
+ copy->SetComponents(getter, setter);
// Use set to update attributes.
return SetPropertyCallback(name, copy, attributes);
}
@@ -4441,7 +4439,7 @@
{ MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
}
- accessors->set(component, fun);
+ accessors->SetComponents(getter, setter);
return SetPropertyCallback(name, accessors, attributes);
}
@@ -4512,12 +4510,6 @@
MaybeObject* JSObject::SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
- bool convert_back_to_fast = HasFastProperties() &&
- (map()->instance_descriptors()->number_of_descriptors()
- < DescriptorArray::kMaxNumberOfDescriptors);
-
// Normalize object to make this operation simple.
{ MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (maybe_ok->IsFailure()) return maybe_ok;
@@ -4538,22 +4530,29 @@
}
// Update the dictionary with the new CALLBACKS property.
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
{ MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
}
- if (convert_back_to_fast) {
- MaybeObject* maybe_ok = TransformToFastProperties(0);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
return GetHeap()->undefined_value();
}
+
+void JSObject::DefineAccessor(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->DefineAccessor(*name, *getter, *setter, attributes));
+}
+
MaybeObject* JSObject::DefineAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
- ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -4566,8 +4565,8 @@
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(name, component,
- fun, attributes);
+ return JSObject::cast(proto)->DefineAccessor(
+ name, getter, setter, attributes);
}
// Make sure that the top context does not change when doing callbacks or
@@ -4581,8 +4580,8 @@
uint32_t index = 0;
return name->AsArrayIndex(&index) ?
- DefineElementAccessor(index, component, fun, attributes) :
- DefinePropertyAccessor(name, component, fun, attributes);
+ DefineElementAccessor(index, getter, setter, attributes) :
+ DefinePropertyAccessor(name, getter, setter, attributes);
}
@@ -4696,7 +4695,7 @@
Object* element = dictionary->ValueAt(entry);
if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
element->IsAccessorPair()) {
- return AccessorPair::cast(element)->SafeGet(component);
+ return AccessorPair::cast(element)->GetComponent(component);
}
}
}
@@ -4712,7 +4711,7 @@
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->SafeGet(component);
+ return AccessorPair::cast(obj)->GetComponent(component);
}
}
}
@@ -5949,8 +5948,8 @@
}
-Object* AccessorPair::SafeGet(AccessorComponent component) {
- Object* accessor = get(component);
+Object* AccessorPair::GetComponent(AccessorComponent component) {
+ Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
}
@@ -7451,7 +7450,7 @@
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
- ASSERT(value->IsJSObject());
+ ASSERT(value->IsJSReceiver());
Heap* heap = GetHeap();
if (has_initial_map()) {
// If the function has allocated the initial map
@@ -7478,11 +7477,11 @@
ASSERT(should_have_prototype());
Object* construct_prototype = value;
- // If the value is not a JSObject, store the value in the map's
+ // If the value is not a JSReceiver, store the value in the map's
// constructor field so it can be accessed. Also, set the prototype
// used for constructing objects to the original object prototype.
// See ECMA-262 13.2.2.
- if (!value->IsJSObject()) {
+ if (!value->IsJSReceiver()) {
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
diff --git a/src/objects.h b/src/objects.h
index c7093a8..a9cb8e0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1627,9 +1627,14 @@
String* name,
bool continue_search);
+ static void DefineAccessor(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
Object* LookupAccessor(String* name, AccessorComponent component);
@@ -2178,13 +2183,13 @@
PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefineElementAccessor(
uint32_t index,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
String* name,
- AccessorComponent component,
- Object* fun,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes);
void LookupInDescriptor(String* name, LookupResult* result);
@@ -2340,6 +2345,7 @@
public:
// Setter and getter for elements.
inline double get_scalar(int index);
+ inline int64_t get_representation(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -4174,6 +4180,11 @@
// it is only used by the garbage collector itself.
DECL_ACCESSORS(gc_metadata, Object)
+ // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
+ // at the moment when this object was created.
+ inline void set_ic_age(int count);
+ inline int ic_age();
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -4426,8 +4437,9 @@
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
- static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;
-
+ static const int kICAgeOffset =
+ kGCMetadataOffset + kPointerSize;
+ static const int kFlagsOffset = kICAgeOffset + kIntSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;
@@ -8045,23 +8057,15 @@
MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
- Object* get(AccessorComponent component) {
- ASSERT(component == ACCESSOR_GETTER || component == ACCESSOR_SETTER);
- return (component == ACCESSOR_GETTER) ? getter() : setter();
- }
+ // Note: Returns undefined instead in case of a hole.
+ Object* GetComponent(AccessorComponent component);
- void set(AccessorComponent component, Object* value) {
- ASSERT(component == ACCESSOR_GETTER || component == ACCESSOR_SETTER);
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
+ // Set both components, skipping arguments which are a JavaScript null.
+ void SetComponents(Object* getter, Object* setter) {
+ if (!getter->IsNull()) set_getter(getter);
+ if (!setter->IsNull()) set_setter(setter);
}
- // Same as get, but returns undefined instead of the hole.
- Object* SafeGet(AccessorComponent component);
-
bool ContainsAccessor() {
return IsJSAccessor(getter()) || IsJSAccessor(setter());
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index de9bf40..cde6057 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -173,7 +173,7 @@
// prepared to generate it, but we don't expect to have to.
bool found_code = false;
Code* stack_check_code = NULL;
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
@@ -290,7 +290,12 @@
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
+ } else if (ticks >= 100) {
+ // If this function does not have enough type info, but has
+ // seen a huge number of ticks, optimize it as it is.
+ Optimize(function, "not much type info but very hot");
} else {
+ function->shared()->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -344,7 +349,7 @@
void RuntimeProfiler::NotifyTick() {
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
if (FLAG_count_based_interrupts) return;
#endif
isolate_->stack_guard()->RequestRuntimeProfilerTick();
diff --git a/src/runtime.cc b/src/runtime.cc
index 6e93e58..a1e1633 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1065,10 +1065,10 @@
AccessorPair::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, heap->true_value());
if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->SafeGet(ACCESSOR_GETTER));
+ elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
}
if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->SafeGet(ACCESSOR_SETTER));
+ elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
}
break;
}
@@ -1115,10 +1115,10 @@
AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->SafeGet(ACCESSOR_GETTER));
+ elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
}
if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->SafeGet(ACCESSOR_SETTER));
+ elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
}
} else {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -4350,26 +4350,9 @@
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- // TODO(svenpanne) Define getter/setter/attributes in a single step.
- if (getter->IsNull() && setter->IsNull()) {
- JSArray* array;
- { MaybeObject* maybe_array = GetOwnProperty(isolate, obj, name);
- if (!maybe_array->To(&array)) return maybe_array;
- }
- Object* current = FixedArray::cast(array->elements())->get(GETTER_INDEX);
- getter = Handle<Object>(current, isolate);
- }
- if (!getter->IsNull()) {
- MaybeObject* ok =
- obj->DefineAccessor(*name, ACCESSOR_GETTER, *getter, attr);
- if (ok->IsFailure()) return ok;
- }
- if (!setter->IsNull()) {
- MaybeObject* ok =
- obj->DefineAccessor(*name, ACCESSOR_SETTER, *setter, attr);
- if (ok->IsFailure()) return ok;
- }
-
+ bool fast = obj->HasFastProperties();
+ JSObject::DefineAccessor(obj, name, getter, setter, attr);
+ if (fast) JSObject::TransformToFastProperties(obj, 0);
return isolate->heap()->undefined_value();
}
@@ -8140,6 +8123,14 @@
ASSERT(*arguments != isolate->heap()->undefined_value());
}
frame->SetExpression(i, *arguments);
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing arguments object for frame %p - %p: %p ",
+ reinterpret_cast<void*>(frame->sp()),
+ reinterpret_cast<void*>(frame->fp()),
+ reinterpret_cast<void*>(*arguments));
+ arguments->ShortPrint();
+ PrintF("\n");
+ }
}
}
}
@@ -8375,7 +8366,7 @@
PrintF("]\n");
}
Handle<Code> check_code;
-#ifdef V8_TARGET_ARCH_IA32
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
check_code = interrupt_stub.GetCode();
@@ -10223,8 +10214,8 @@
if (hasJavaScriptAccessors) {
AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
details->set(2, isolate->heap()->ToBoolean(caught_exception));
- details->set(3, accessors->SafeGet(ACCESSOR_GETTER));
- details->set(4, accessors->SafeGet(ACCESSOR_SETTER));
+ details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
+ details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
}
return *isolate->factory()->NewJSArrayWithElements(details);
diff --git a/src/serialize.cc b/src/serialize.cc
index 4249d36..01d5f1c 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -849,13 +849,12 @@
if (how == kFromCode) { \
Address location_of_branch_data = \
reinterpret_cast<Address>(current); \
- Assembler::set_target_at(location_of_branch_data, \
- reinterpret_cast<Address>(new_object)); \
- if (within == kFirstInstruction) { \
- location_of_branch_data += Assembler::kCallTargetSize; \
- current = reinterpret_cast<Object**>(location_of_branch_data); \
- current_was_incremented = true; \
- } \
+ Assembler::deserialization_set_special_target_at( \
+ location_of_branch_data, \
+ reinterpret_cast<Address>(new_object)); \
+ location_of_branch_data += Assembler::kSpecialTargetSize; \
+ current = reinterpret_cast<Object**>(location_of_branch_data); \
+ current_was_incremented = true; \
} else { \
*current = new_object; \
} \
@@ -991,6 +990,21 @@
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
+#if V8_TARGET_ARCH_MIPS
+ // Deserialize a new object from pointer found in code and write
+ // a pointer to it to the current object. Required only for MIPS, and
+ // omitted on the other architectures because it is fully unrolled and
+ // would cause bloat.
+ ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
+ // Find a recently deserialized code object using its offset from the
+ // current allocation point and write a pointer to it to the current
+ // object. Required only for MIPS.
+ ALL_SPACES(kBackref, kFromCode, kStartOfObject)
+ // Find an already deserialized code object using its offset from
+ // the start and write a pointer to it to the current object.
+ // Required only for MIPS.
+ ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
+#endif
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
// to the current code object or the instruction pointer in a function
@@ -1229,12 +1243,23 @@
}
-int Serializer::RootIndex(HeapObject* heap_object) {
+int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
Heap* heap = HEAP;
if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
- if (!root->IsSmi() && root == heap_object) return i;
+ if (!root->IsSmi() && root == heap_object) {
+#if V8_TARGET_ARCH_MIPS
+ if (from == kFromCode) {
+ // In order to avoid code bloat in the deserializer we don't have
+ // support for the encoding that specifies a particular root should
+ // be written into the lui/ori instructions on MIPS. Therefore we
+ // should not generate such serialization data for MIPS.
+ return kInvalidRootIndex;
+ }
+#endif
+ return i;
+ }
}
return kInvalidRootIndex;
}
@@ -1287,7 +1312,7 @@
HeapObject* heap_object = HeapObject::cast(o);
int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
return;
}
@@ -1359,7 +1384,7 @@
}
int root_index;
- if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
return;
}
@@ -1439,7 +1464,7 @@
while (current < end && !(*current)->IsSmi()) {
HeapObject* current_contents = HeapObject::cast(*current);
- int root_index = serializer_->RootIndex(current_contents);
+ int root_index = serializer_->RootIndex(current_contents, kPlain);
// Repeats are not subject to the write barrier so there are only some
// objects that can be used in a repeat encoding. These are the early
// ones in the root array that are never in new space.
diff --git a/src/serialize.h b/src/serialize.h
index 02bf58a..f50e23e 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -485,7 +485,7 @@
protected:
static const int kInvalidRootIndex = -1;
- int RootIndex(HeapObject* heap_object);
+ int RootIndex(HeapObject* heap_object, HowToCode from);
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
intptr_t root_index_wave_front() { return root_index_wave_front_; }
void set_root_index_wave_front(intptr_t value) {
diff --git a/src/version.cc b/src/version.cc
index 0b075fa..3574a6d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
-#define BUILD_NUMBER 23
+#define BUILD_NUMBER 24
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8e3caa4..a9cc2ef 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -235,9 +235,9 @@
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
- return Assembler::kCallTargetSize;
+ return Assembler::kSpecialTargetSize;
} else {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 5397cd5..2f0c542 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -3045,8 +3045,6 @@
return (1 << rmode_) & kApplyMask;
}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 3725c03..60b29e6 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -577,8 +577,8 @@
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -591,8 +591,7 @@
inline Handle<Object> code_target_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
- static const int kCallTargetSize = 4; // Use 32-bit displacement.
- static const int kExternalTargetSize = 8; // Use 64-bit absolute.
+ static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 4287c3c..0555aeb 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2497,24 +2497,28 @@
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
- // Check for arguments adapter frame.
- Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted, Label::kNear);
+ if (instr->from_inlined()) {
+ __ lea(result, Operand(rsp, -2 * kPointerSize));
+ } else {
+ // Check for arguments adapter frame.
+ Label done, adapted;
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted, Label::kNear);
- // No arguments adaptor frame.
- __ movq(result, rbp);
- __ jmp(&done, Label::kNear);
+ // No arguments adaptor frame.
+ __ movq(result, rbp);
+ __ jmp(&done, Label::kNear);
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
+ }
}
@@ -2640,6 +2644,11 @@
}
+void LCodeGen::DoPop(LPop* instr) {
+ __ Drop(instr->count());
+}
+
+
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -4229,34 +4238,46 @@
}
}
- // Copy elements backing store header.
- ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
+ // Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ movq(rcx, FieldOperand(source, i));
__ movq(FieldOperand(result, elements_offset + i), rcx);
}
- }
- // Copy elements backing store content.
- ASSERT(!has_elements || elements->IsFixedArray());
- int elements_length = has_elements ? elements->length() : 0;
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value = JSObject::GetElement(object, i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(rcx, Operand(result, *offset));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ } else {
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ }
} else {
- __ movq(rcx, value, RelocInfo::NONE);
- __ movq(FieldOperand(result, total_offset), rcx);
+ UNREACHABLE();
}
}
}
@@ -4706,7 +4727,7 @@
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(NegateCondition(cc), instr->environment());
+ DeoptimizeIf(cc, instr->environment());
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index d3e4cdd..baff1bd 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1071,7 +1071,8 @@
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements(
+ current_block_->last_environment()->outer() != NULL));
}
@@ -2270,6 +2271,9 @@
undefined,
instr->call_kind(),
instr->is_construct());
+ if (instr->materializes_arguments()) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ }
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2277,10 +2281,21 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (instr->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LPop(argument_count);
+ argument_count_ -= argument_count;
+ }
+
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return NULL;
+
+ return pop;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 2d8fd2e..b9e390e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -179,7 +179,8 @@
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
- V(WrapReceiver)
+ V(WrapReceiver) \
+ V(Pop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -535,9 +536,15 @@
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
- LArgumentsElements() { }
+ explicit LArgumentsElements(bool from_inlined)
+ : from_inlined_(from_inlined) { }
+
+ bool from_inlined() const { return from_inlined_; }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+
+ private:
+ bool from_inlined_;
};
@@ -1358,6 +1365,19 @@
};
+class LPop: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LPop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Pop, "pop")
+
+ private:
+ int count_;
+};
+
+
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 2de0afb..af28be1 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -84,10 +84,6 @@
##############################################################################
-[ $arch == mips ]
-test-serialize: SKIP
-
-##############################################################################
[ $arch == mips && $crankshaft ]
# Tests that time out with crankshaft.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 8df0b7b..b1a23c1 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -5870,6 +5870,7 @@
"p.push(String.fromCharCode(0xdc00));"
"var a = [];"
"var b = [];"
+ "var c = [];"
"var alens = [];"
"for (var i = 0; i < 3; i++) {"
" p[1] = String.fromCharCode(lead++);"
@@ -5877,17 +5878,21 @@
" p[2] = String.fromCharCode(trail++);"
" a.push(p[i] + p[j]);"
" b.push(p[i] + p[j]);"
+ " c.push(p[i] + p[j]);"
" alens.push(plens[i] + plens[j]);"
" }"
"}"
"alens[5] -= 2;" // Here the surrogate pairs match up.
"var a2 = [];"
"var b2 = [];"
+ "var c2 = [];"
"var a2lens = [];"
"for (var m = 0; m < 9; m++) {"
" for (var n = 0; n < 9; n++) {"
" a2.push(a[m] + a[n]);"
" b2.push(b[m] + b[n]);"
+ " var newc = 'x' + c[m] + c[n] + 'y';"
+ " c2.push(newc.substring(1, newc.length - 1));"
" var utf = alens[m] + alens[n];" // And here.
// The 'n's that start with 0xdc.. are 6-8
// The 'm's that end with 0xd8.. are 1, 4 and 7
@@ -5899,6 +5904,7 @@
Utf16Helper(context, "a2", "a2lens", 81);
WriteUtf8Helper(context, "b", "alens", 9);
WriteUtf8Helper(context, "b2", "a2lens", 81);
+ WriteUtf8Helper(context, "c2", "a2lens", 81);
}
diff --git a/test/mjsunit/compiler/inline-arguments.js b/test/mjsunit/compiler/inline-arguments.js
index b6adf7f..48f6019 100644
--- a/test/mjsunit/compiler/inline-arguments.js
+++ b/test/mjsunit/compiler/inline-arguments.js
@@ -113,3 +113,46 @@
%OptimizeFunctionOnNextCall(test_adaptation);
test_adaptation();
})();
+
+// Test arguments access from the inlined function.
+function uninlinable(v) {
+ assertEquals(0, v);
+ try { } catch (e) { }
+ return 0;
+}
+
+function toarr_inner() {
+ var a = arguments;
+ var marker = a[0];
+ uninlinable(uninlinable(0, 0), marker.x);
+
+ var r = new Array();
+ for (var i = a.length - 1; i >= 1; i--) {
+ r.push(a[i]);
+ }
+
+ return r;
+}
+
+function toarr1(marker, a, b, c) {
+ return toarr_inner(marker, a / 2, b / 2, c / 2);
+}
+
+function toarr2(marker, a, b, c) {
+ var x = 0;
+ return uninlinable(uninlinable(0, 0),
+ x = toarr_inner(marker, a / 2, b / 2, c / 2)), x;
+}
+
+function test_toarr(toarr) {
+ var marker = { x: 0 };
+ assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+ assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+ %OptimizeFunctionOnNextCall(toarr);
+ assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+ delete marker.x;
+ assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+}
+
+test_toarr(toarr1);
+test_toarr(toarr2);
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index 50c8613..8d8f839 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -2257,3 +2257,22 @@
return function(k) { throw "myexn" }
}
}))
+
+
+
+// Constructor functions with proxy prototypes.
+
+function TestConstructorWithProxyPrototype() {
+ TestWithProxies(TestConstructorWithProxyPrototype2, {})
+}
+
+function TestConstructorWithProxyPrototype2(create, handler) {
+ function C() {};
+ C.prototype = create(handler);
+
+ var o = new C;
+ assertSame(C.prototype, o.__proto__);
+ assertSame(C.prototype, Object.getPrototypeOf(o));
+}
+
+TestConstructorWithProxyPrototype();
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index cc2925d..e64959a 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -69,6 +69,9 @@
ecma_3/Date/15.9.3.2-1: SKIP
js1_2/function/Number: SKIP
+# TODO(2018): Temporarily allow timeout in debug mode.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT if $mode == debug
+
##################### SLOW TESTS #####################
# This takes a long time to run (~100 seconds). It should only be run
diff --git a/tools/merge-to-branch.sh b/tools/merge-to-branch.sh
index d75cafd..49bf3e4 100644
--- a/tools/merge-to-branch.sh
+++ b/tools/merge-to-branch.sh
@@ -48,6 +48,7 @@
OPTIONS:
-h Show this message
-s Specify the step where to start work. Default: 0.
+ -p Specify a patch file to apply as part of the merge
EOF
}
@@ -61,17 +62,19 @@
restore_patch_commit_hashes_if_unset() {
[[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && restore_patch_commit_hashes
- [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && \
+ [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && [[ -z "$EXTRA_PATCH" ]] && \
die "Variable PATCH_COMMIT_HASHES could not be restored."
}
########## Option parsing
-while getopts ":hs:f" OPTION ; do
+while getopts ":hs:fp:" OPTION ; do
case $OPTION in
h) usage
exit 0
;;
+ p) EXTRA_PATCH=$OPTARG
+ ;;
f) rm -f "$ALREADY_MERGING_SENTINEL_FILE"
;;
s) START_STEP=$OPTARG
@@ -95,6 +98,9 @@
initial_environment_checks
if [ $START_STEP -le $CURRENT_STEP ] ; then
+ if [ ${#@} -lt 2 ] && [ -z "$EXTRA_PATCH" ] ; then
+ die "Either a patch file or revision numbers must be specified"
+ fi
echo ">>> Step $CURRENT_STEP: Preparation"
MERGE_TO_BRANCH=$1
[[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
@@ -121,11 +127,15 @@
[[ -n "$NEXT_HASH" ]] \
|| die "Cannot determine git hash for r$REVISION"
PATCH_COMMIT_HASHES[$current]="$NEXT_HASH"
- [[ -n "$NEW_COMMIT_MSG" ]] && NEW_COMMIT_MSG="$NEW_COMMIT_MSG,"
- NEW_COMMIT_MSG="$NEW_COMMIT_MSG r$REVISION"
+ [[ -n "$REVISION_LIST" ]] && REVISION_LIST="$REVISION_LIST,"
+ REVISION_LIST="$REVISION_LIST r$REVISION"
let current+=1
done
- NEW_COMMIT_MSG="Merged$NEW_COMMIT_MSG into $MERGE_TO_BRANCH branch."
+ if [ -z "$REVISION_LIST" ] ; then
+ NEW_COMMIT_MSG="Applied patch to $MERGE_TO_BRANCH branch."
+ else
+ NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch."
+ fi;
echo "$NEW_COMMIT_MSG" > $COMMITMSG_FILE
echo "" >> $COMMITMSG_FILE
@@ -145,6 +155,7 @@
echo "BUG=$BUG_AGGREGATE" >> $COMMITMSG_FILE
fi
persist "NEW_COMMIT_MSG"
+ persist "REVISION_LIST"
persist_patch_commit_hashes
fi
@@ -159,6 +170,9 @@
git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE"
apply_patch "$TEMPORARY_PATCH_FILE"
done
+ if [ -n "$EXTRA_PATCH" ] ; then
+ apply_patch "$EXTRA_PATCH"
+ fi
stage_files
fi
@@ -234,10 +248,20 @@
https://v8.googlecode.com/svn/$TO_URL \
https://v8.googlecode.com/svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH \
-m "Tagging version $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ persist "TO_URL"
fi
let CURRENT_STEP+=1
if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Cleanup."
+ restore_if_unset "SVN_REVISION"
+ restore_if_unset "TO_URL"
+ restore_if_unset "REVISION_LIST"
+ restore_version_if_unset "NEW"
common_cleanup
+ echo "*** SUMMARY ***"
+ echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ echo "branch: $TO_URL"
+ echo "svn revision: $SVN_REVISION"
+ [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
fi
diff --git a/tools/push-to-trunk.sh b/tools/push-to-trunk.sh
index 42d4e4b..3fb5b34 100755
--- a/tools/push-to-trunk.sh
+++ b/tools/push-to-trunk.sh
@@ -359,9 +359,9 @@
git commit -am "Update V8 to version $MAJOR.$MINOR.$BUILD.
TBR=$REVIEWER" || die "'git commit' failed."
- git cl upload --send-mail --use-commit-queue \
+ git cl upload --send-mail \
|| die "'git cl upload' failed, please try again."
- echo "CL uploaded and sent to commit queue."
+ echo "CL uploaded."
fi
let CURRENT_STEP+=1