Version 3.15.9
Fixed candidate eviction in code flusher. (Chromium issue 159140)
Iterate through all arguments for side effects in Math.min/max. (issue 2444)
Fixed spec violations related to regexp.lastIndex (issue 2437, issue 2438)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@13155 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index a30bfd5..d028758 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -758,7 +758,7 @@
//
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
Object* current = receiver->GetPrototype();
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
@@ -768,12 +768,36 @@
}
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
- Object* value,
+MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+ return GetPrototypeSkipHiddenPrototypes(receiver);
+}
+
+
+MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
+ Object* value_raw,
void*) {
- const bool skip_hidden_prototypes = true;
+ const bool kSkipHiddenPrototypes = true;
// To be consistent with other Set functions, return the value.
- return receiver->SetPrototype(value, skip_hidden_prototypes);
+ if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
+ return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
+
+ Isolate* isolate = receiver_raw->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver(receiver_raw);
+ Handle<Object> value(value_raw);
+ Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(*receiver));
+
+ MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(*receiver));
+ if (!new_value->SameValue(*old_value)) {
+ JSObject::EnqueueChangeRecord(receiver, "prototype",
+ isolate->factory()->Proto_symbol(),
+ old_value);
+ }
+ return *hresult;
}
diff --git a/src/api.cc b/src/api.cc
index 39741f5..0c3460e 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -630,7 +630,16 @@
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object, parameters,
- callback);
+ callback);
+}
+
+
+void V8::MakeWeak(i::Isolate* isolate, i::Object** object,
+ void* parameters, WeakReferenceCallback callback) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "MakeWeak");
+ isolate->global_handles()->MakeWeak(object, parameters,
+ callback);
}
@@ -701,6 +710,14 @@
}
+bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "IsGlobalWeak");
+ if (!isolate->IsInitialized()) return false;
+ return i::GlobalHandles::IsWeak(obj);
+}
+
+
void V8::DisposeGlobal(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "DisposeGlobal");
@@ -769,6 +786,12 @@
}
+i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+ ASSERT(isolate == i::Isolate::Current());
+ return i::HandleScope::CreateHandle(value, isolate);
+}
+
+
i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
ASSERT(value->IsHeapObject());
return reinterpret_cast<i::Object**>(
@@ -5384,6 +5407,7 @@
isolate->SetFailedAccessCheckCallback(callback);
}
+
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
@@ -5395,6 +5419,19 @@
}
+void V8::AddObjectGroup(Isolate* exportedIsolate,
+ Persistent<Value>* objects,
+ size_t length,
+ RetainedObjectInfo* info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exportedIsolate);
+ ASSERT(isolate == i::Isolate::Current());
+ if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
+ STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
+ isolate->global_handles()->AddObjectGroup(
+ reinterpret_cast<i::Object***>(objects), length, info);
+}
+
+
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
@@ -6405,7 +6442,8 @@
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
- ActivityControl* control) {
+ ActivityControl* control,
+ ObjectNameResolver* resolver) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@@ -6418,7 +6456,7 @@
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control));
+ *Utils::OpenHandle(*title), internal_type, control, resolver));
}
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index acd61fe..a31b7e9 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -47,6 +47,33 @@
namespace internal {
+int Register::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return kMaxNumAllocatableRegisters;
+ } else {
+ return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
+ }
+}
+
+
+int DwVfpRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return DwVfpRegister::kNumRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
+int DwVfpRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return DwVfpRegister::kMaxNumAllocatableRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 47ea0e2..52edb39 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -85,6 +85,33 @@
}
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "d0",
+ "d1",
+ "d2",
+ "d3",
+ "d4",
+ "d5",
+ "d6",
+ "d7",
+ "d8",
+ "d9",
+ "d10",
+ "d11",
+ "d12",
+ "d13"
+ };
+ return names[index];
+ } else {
+ ASSERT(index == 0);
+ return "sfpd0";
+ }
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 3b9bb80..a361c7e 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -71,21 +71,24 @@
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
+ static const int kGPRsPerNonVFP2Double = 2;
+
+ inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
+ ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@@ -165,7 +168,6 @@
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
-
// Single word VFP register.
struct SwVfpRegister {
bool is_valid() const { return 0 <= code_ && code_ < 32; }
@@ -196,37 +198,19 @@
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters -
+ static const int kMaxNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
+ static const char* AllocationIndexToString(int index);
static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "d0",
- "d1",
- "d2",
- "d3",
- "d4",
- "d5",
- "d6",
- "d7",
- "d8",
- "d9",
- "d10",
- "d11",
- "d12",
- "d13"
- };
- return names[index];
- }
-
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
@@ -323,6 +307,9 @@
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
+const Register sfpd_lo = { kRegister_r6_Code };
+const Register sfpd_hi = { kRegister_r7_Code };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 24d14e8..28e00dd 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1259,6 +1259,26 @@
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ mov(ip, lr); // Stash the miss continuation
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ pop(lr); // Restore LR to continuation in JSFunction
+ __ mov(pc, ip); // Jump to miss handler
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 9484f85..a571f9f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -37,6 +37,17 @@
namespace internal {
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -503,7 +514,7 @@
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -3568,10 +3579,10 @@
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
+ const DwVfpRegister double_base = d1;
+ const DwVfpRegister double_exponent = d2;
+ const DwVfpRegister double_result = d3;
+ const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
@@ -3781,12 +3792,29 @@
void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code = NULL;
+ Code* store_buffer_overflow_code = NULL;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope2(VFP2);
+ save_doubles_code = *save_doubles.GetCode();
+ store_buffer_overflow_code = *stub.GetCode();
+ } else {
+ save_doubles_code = *save_doubles.GetCode();
+ store_buffer_overflow_code = *stub.GetCode();
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ }
+ ISOLATE->set_fp_stubs_generated(true);
}
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 0443cf7..6f964a8 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -36,7 +36,7 @@
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,7 +58,7 @@
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -77,7 +77,7 @@
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -219,7 +219,7 @@
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -242,7 +242,7 @@
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -255,7 +255,7 @@
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -295,7 +295,7 @@
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -329,7 +329,7 @@
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -355,7 +355,7 @@
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -511,7 +511,7 @@
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@@ -570,7 +570,7 @@
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@@ -589,7 +589,7 @@
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@@ -739,7 +739,7 @@
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 300772a..5e8739c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -73,10 +73,10 @@
{
CpuFeatures::Scope use_vfp(VFP2);
- DoubleRegister input = d0;
- DoubleRegister result = d1;
- DoubleRegister double_scratch1 = d2;
- DoubleRegister double_scratch2 = d3;
+ DwVfpRegister input = d0;
+ DwVfpRegister result = d1;
+ DwVfpRegister double_scratch1 = d2;
+ DwVfpRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
@@ -521,16 +521,60 @@
}
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ tst(index, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi index");
+ __ tst(value, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi value");
+
+ __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ __ cmp(index, ip);
+ __ Check(lt, "Index is too large");
+
+ __ cmp(index, Operand(Smi::FromInt(0)));
+ __ Check(ge, "Index is negative");
+
+ __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type");
+ }
+
+ __ add(ip,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(value, value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
+ __ strb(value, MemOperand(ip, index, LSR, 1));
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ strh(value, MemOperand(ip, index));
+ }
+}
+
+
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 8f0033e..75899a9 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -44,6 +44,10 @@
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -68,6 +72,8 @@
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -92,10 +98,10 @@
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 19667b9..84acba2 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -222,7 +222,7 @@
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -256,7 +256,7 @@
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -348,7 +348,7 @@
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -461,6 +461,70 @@
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-fp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-sp
+ // | | saved frame (fp) |
+ // | +=========================+<-fp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | fp = saved frame
+ // +-------------------------+ f8 = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-sp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+ Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(fp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(cp.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(r1.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(r0.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@@ -888,7 +952,7 @@
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -908,7 +972,6 @@
Isolate* isolate = masm()->isolate();
- CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -916,23 +979,29 @@
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kNumAllocatableRegisters - 1));
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Save all VFP registers before messing with them.
+ DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+ DwVfpRegister last =
+ DwVfpRegister::FromAllocationIndex(
+ DwVfpRegister::kMaxNumAllocatableRegisters - 1);
+ ASSERT(last.code() > first.code());
+ ASSERT((last.code() - first.code()) ==
+ (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
#ifdef DEBUG
- for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
- }
+ int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
+ for (int i = 0; i <= max; i++) {
+ ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+ (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
+ }
#endif
- __ vstm(db_w, sp, first, last);
+ __ vstm(db_w, sp, first, last);
+ } else {
+ __ sub(sp, sp, Operand(kDoubleRegsSize));
+ }
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -991,14 +1060,17 @@
__ str(r2, MemOperand(r1, offset));
}
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Copy VFP registers to
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ vldr(d0, sp, src_offset);
+ __ vstr(d0, r1, dst_offset);
+ }
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -1019,10 +1091,13 @@
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@@ -1039,24 +1114,29 @@
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: r0 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
__ add(r1, r0, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
+ __ bind(&inner_loop_header);
__ cmp(r3, Operand(0));
__ b(ne, &inner_push_loop); // test for gt?
__ add(r0, r0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index b0df846..631e101 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -3142,6 +3142,39 @@
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(r2);
+ __ pop(r1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(r2);
+ __ pop(r1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
+ context()->Plug(r0);
+}
+
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 32dda27..f44ca98 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -42,10 +42,10 @@
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -612,6 +612,7 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -1646,6 +1647,16 @@
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1684,6 +1695,7 @@
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1708,6 +1720,7 @@
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1727,6 +1740,7 @@
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1964,7 +1978,16 @@
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
+ // float->double conversion on non-VFP2 requires an extra scratch
+ // register. For convenience, just mark the elements register as "UseTemp"
+ // so that it can be used as a temp during the float->double conversion
+ // after it's no longer needed after the float load.
+ bool needs_temp =
+ !CpuFeatures::IsSupported(VFP2) &&
+ (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+ LOperand* external_pointer = needs_temp
+ ? UseTempRegister(instr->elements())
+ : UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@@ -2182,8 +2205,17 @@
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index b45a3e0..1b589ce 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -150,6 +150,7 @@
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -255,6 +256,11 @@
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
+ // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
@@ -1198,6 +1204,30 @@
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -2334,8 +2364,9 @@
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 515a0d0..1cfa8e9 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -65,8 +65,6 @@
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
@@ -118,37 +116,38 @@
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
+ // r1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ cmp(r5, Operand(0));
+ __ b(eq, &ok);
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
}
-
info()->set_prologue_offset(masm_->pc_offset());
- {
+ if (NeedsEagerFrame()) {
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
@@ -159,6 +158,7 @@
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
+ frame_is_built_ = true;
}
// Reserve space for the stack slots needed by the code.
@@ -178,7 +178,7 @@
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -214,7 +214,7 @@
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -272,10 +272,31 @@
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ __ pop(ip);
+ __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -297,24 +318,68 @@
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 2)) {
+ deopt_jump_table_.length() * 7)) {
Abort("Generated code is too large");
}
- // Block the constant pool emission during the jump table emission.
- __ BlockConstPoolFor(deopt_jump_table_.length());
__ RecordComment("[ Deoptimisation jump table");
Label table_start;
__ bind(&table_start);
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ Address entry = deopt_jump_table_[i].address;
+ if (deopt_jump_table_[i].needs_frame) {
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ b(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, ip);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ b(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(pc, ip);
+ }
+ }
+ } else {
+ if (deopt_jump_table_[i].is_lazy_deopt) {
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ } else {
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ }
+ }
+ masm()->CheckConstPool(false, false);
}
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
- deopt_jump_table_.length() * 2);
__ RecordComment("]");
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
@@ -334,8 +399,8 @@
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DwVfpRegister::FromAllocationIndex(index);
}
@@ -376,15 +441,15 @@
}
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
@@ -520,7 +585,9 @@
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -541,6 +608,9 @@
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
@@ -736,7 +806,11 @@
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -752,14 +826,19 @@
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
- if (cc == al) {
+ bool needs_lazy_deopt = info()->IsStub();
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == al && !needs_lazy_deopt) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ deopt_jump_table_.Add(table_entry, zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
@@ -1368,6 +1447,7 @@
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
+ CpuFeatures::Scope vfp_scope(VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@@ -1653,6 +1733,7 @@
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@@ -1764,6 +1845,15 @@
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -1821,9 +1911,10 @@
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left_reg = ToDoubleRegister(left);
+ DwVfpRegister right_reg = ToDoubleRegister(right);
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
Label check_nan_left, check_zero, return_left, return_right, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
__ b(vs, &check_nan_left);
@@ -1866,9 +1957,10 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left = ToDoubleRegister(instr->left());
+ DwVfpRegister right = ToDoubleRegister(instr->right());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
@@ -1956,7 +2048,8 @@
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
@@ -2041,8 +2134,9 @@
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ CpuFeatures::Scope scope(VFP2);
// heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, ¬_heap_number);
@@ -2120,6 +2214,7 @@
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatures::Scope scope(VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2658,16 +2753,21 @@
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
+ if (NeedsEagerFrame()) {
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ if (info()->IsStub()) {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
__ Jump(lr);
}
@@ -3017,17 +3117,63 @@
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(result.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, result.low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ Register value = external_pointer;
+ __ ldr(value, MemOperand(scratch0(), additional_offset));
+ __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+ __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
+ __ and_(scratch0(), scratch0(),
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ teq(scratch0(), Operand(0x00));
+ __ b(eq, &exponent_rebiased);
+
+ __ teq(scratch0(), Operand(0xff));
+ __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
+ __ b(eq, &exponent_rebiased);
+
+ // Rebias exponent.
+ __ add(scratch0(),
+ scratch0(),
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
+ __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
+
+ } else {
+ __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
+ __ ldr(sfpd_hi, MemOperand(scratch0(),
+ additional_offset + kPointerSize));
+ }
}
} else {
Register result = ToRegister(instr->result());
@@ -3096,23 +3242,28 @@
key = ToRegister(instr->key());
}
- Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
+ __ add(elements, elements, Operand(key, LSL, shift_size));
}
-
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ add(elements, elements, Operand(base_offset));
+ __ vldr(result, elements, 0);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ } else {
+ __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+ __ ldr(sfpd_lo, MemOperand(elements, base_offset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
}
@@ -3548,6 +3699,7 @@
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(VFP2);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@@ -3584,7 +3736,8 @@
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3609,7 +3762,8 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
@@ -3674,16 +3828,18 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@@ -3702,6 +3858,7 @@
void LCodeGen::DoPower(LPower* instr) {
+ CpuFeatures::Scope scope(VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -3734,6 +3891,7 @@
void LCodeGen::DoRandom(LRandom* instr) {
+ CpuFeatures::Scope scope(VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3812,10 +3970,11 @@
void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DwVfpRegister double_scratch2 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
@@ -4101,6 +4260,7 @@
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@@ -4171,6 +4331,7 @@
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@@ -4447,6 +4608,7 @@
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4464,6 +4626,7 @@
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4525,13 +4688,49 @@
}
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+ masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSR, mantissa_shift_for_hi_word));
+ } else {
+ masm->mov(loword, Operand(0, RelocInfo::NONE));
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSL, -mantissa_shift_for_hi_word));
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+ }
+}
+
+
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
@@ -4546,16 +4745,40 @@
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ } else {
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
+ sfpd_lo, sfpd_hi,
+ scratch0(), s0);
+ }
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ } else {
+ Label no_leading_zero, done;
+ __ tst(src, Operand(0x80000000));
+ __ b(ne, &no_leading_zero);
+
+ // Integer has one leading zeros.
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
+ __ b(&done);
+
+ __ bind(&no_leading_zero);
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
+ __ b(&done);
+ }
}
if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4575,7 +4798,13 @@
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+ __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+ }
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4592,7 +4821,7 @@
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
Register temp1 = ToRegister(instr->temp());
@@ -4608,7 +4837,13 @@
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+ __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+ }
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
}
@@ -4649,13 +4884,14 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
+ DwVfpRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
+ CpuFeatures::Scope scope(VFP2);
Label load_smi, heap_number, done;
@@ -4730,6 +4966,7 @@
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch3 = ToRegister(instr->temp2());
SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
@@ -4821,7 +5058,7 @@
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
+ DwVfpRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
@@ -4970,14 +5207,16 @@
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ CpuFeatures::Scope vfp_scope(VFP2);
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -4985,10 +5224,11 @@
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -5565,6 +5805,7 @@
void LCodeGen::EnsureSpaceForLazyDeopt() {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 921285b..e7afcbf 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -61,6 +61,7 @@
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +77,15 @@
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@@ -84,12 +94,12 @@
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@@ -193,7 +203,7 @@
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -275,7 +285,7 @@
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
+ DwVfpRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -308,7 +318,7 @@
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
- DoubleRegister result,
+ DwVfpRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
@@ -369,11 +379,15 @@
LEnvironment* environment);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
@@ -402,6 +416,7 @@
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -417,6 +432,7 @@
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index c100720..4df1338 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -171,8 +171,10 @@
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -192,8 +194,10 @@
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@@ -229,7 +233,8 @@
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
+ CpuFeatures::Scope scope(VFP2);
+ // ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@@ -267,7 +272,8 @@
}
} else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
@@ -276,7 +282,8 @@
}
} else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
+ CpuFeatures::Scope scope(VFP2);
+ MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 5c064c1..067a05d 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -290,7 +290,7 @@
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
@@ -643,19 +643,19 @@
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
PopSafepointRegisters();
}
@@ -691,7 +691,7 @@
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -967,7 +967,7 @@
}
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
@@ -2717,7 +2717,10 @@
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub stub(1, mode);
CallStub(&stub);
}
@@ -3393,9 +3396,9 @@
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
}
} else {
// In the soft floating point calling convention, every double
@@ -3436,7 +3439,7 @@
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
@@ -3446,8 +3449,8 @@
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+ DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
@@ -3465,7 +3468,7 @@
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
@@ -3748,8 +3751,8 @@
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg) {
Label above_zero;
Label done;
Label in_bounds;
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 3c05e00..50c298b 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -178,7 +178,7 @@
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(DwVfpRegister dst, DwVfpRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
@@ -1058,9 +1058,9 @@
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+ void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1076,7 +1076,7 @@
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void GetCFunctionDoubleResult(const DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
@@ -1289,8 +1289,8 @@
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1365,9 +1365,9 @@
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 4604c33..e79c520 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1053,42 +1053,6 @@
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -3319,9 +3283,17 @@
// -- r1 : receiver
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -3726,339 +3698,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 2));
- __ vldr(d0, r2, 0);
- } else {
- __ add(r4, r3, Operand(key, LSL, 2));
- // r4: pointer to the beginning of the double we want to load.
- __ ldr(r2, MemOperand(r4, 0));
- __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For float array type:
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
- // For double array type:
- // d0: value (if VFP3 is supported)
- // r2/r3: value (if VFP3 is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ add(r0, r5, Operand(kHeapObjectTag));
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r5, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
- Register dst_mantissa = r1;
- Register dst_exponent = r3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- d0,
- dst_mantissa,
- dst_exponent,
- r9,
- s0);
- __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
- __ vcvt_f64_u32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vcvt_f64_f32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
- __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
- __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ mov(r0, r4);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -4403,118 +4042,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss_force_generic);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = r0;
- Register receiver_reg = r1;
- Register elements_reg = r2;
- Register heap_number_reg = r2;
- Register indexed_double_offset = r3;
- Register scratch = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register heap_number_map = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(key_reg, Operand(scratch));
- __ b(hs, &miss_force_generic);
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ add(indexed_double_offset, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(&miss_force_generic, eq);
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ ldr(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(r0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
diff --git a/src/assembler.cc b/src/assembler.cc
index 25157be..ccaf290 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1375,6 +1375,11 @@
}
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
diff --git a/src/assembler.h b/src/assembler.h
index 4639374..111c1d9 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -736,6 +736,8 @@
static ExternalReference page_flags(Page* page);
+ static ExternalReference ForDeoptEntry(Address entry);
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/ast.cc b/src/ast.cc
index 232cb73..c43b913 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -616,14 +616,6 @@
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
-bool AstVisitor::CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check(isolate_);
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
-}
-
-
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
diff --git a/src/ast.h b/src/ast.h
index d299f19..a0a7a73 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -2492,42 +2492,53 @@
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
+ AstVisitor() {}
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
- void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+ virtual void Visit(AstNode* node) = 0;
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
- // Stack overflow tracking support.
- bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow();
-
- // If a stack-overflow exception is encountered when visiting a
- // node, calling SetStackOverflow will make sure that the visitor
- // bails out without visiting more nodes.
- void SetStackOverflow() { stack_overflow_ = true; }
- void ClearStackOverflow() { stack_overflow_ = false; }
-
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
-
- protected:
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- bool stack_overflow_;
};
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
+public: \
+ virtual void Visit(AstNode* node) { \
+ if (!CheckStackOverflow()) node->Accept(this); \
+ } \
+ \
+ void SetStackOverflow() { stack_overflow_ = true; } \
+ void ClearStackOverflow() { stack_overflow_ = false; } \
+ bool HasStackOverflow() const { return stack_overflow_; } \
+ \
+ bool CheckStackOverflow() { \
+ if (stack_overflow_) return true; \
+ StackLimitCheck check(isolate_); \
+ if (!check.HasOverflowed()) return false; \
+ return (stack_overflow_ = true); \
+ } \
+ \
+private: \
+ void InitializeAstVisitor() { \
+ isolate_ = Isolate::Current(); \
+ stack_overflow_ = false; \
+ } \
+ Isolate* isolate() { return isolate_; } \
+ \
+ Isolate* isolate_; \
+ bool stack_overflow_
+
+
// ----------------------------------------------------------------------------
// Construction time visitor.
diff --git a/src/builtins.h b/src/builtins.h
index a2f752e..1ca4053 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -107,6 +107,8 @@
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifyICMiss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
@@ -386,6 +388,7 @@
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
+ static void Generate_NotifyICMiss(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
new file mode 100644
index 0000000..74bd93f
--- /dev/null
+++ b/src/code-stubs-hydrogen.cc
@@ -0,0 +1,135 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
+ graph->OrderBlocks();
+ graph->AssignDominators();
+ graph->CollectPhis();
+ graph->InsertRepresentationChanges();
+ graph->EliminateRedundantBoundsChecks();
+ LChunk* chunk = LChunk::NewChunk(graph);
+ ASSERT(chunk != NULL);
+ Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
+ return stub;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+ CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+ : HGraphBuilder(&info_), info_(stub, isolate) {}
+ virtual bool BuildGraph();
+
+ protected:
+ virtual void BuildCodeStub() = 0;
+ HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
+ HydrogenCodeStub* stub() { return info_.code_stub(); }
+
+ private:
+ SmartArrayPointer<HParameter*> parameters_;
+ CompilationInfoWithZone info_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+ if (FLAG_trace_hydrogen) {
+ PrintF("-----------------------------------------------------------\n");
+ PrintF("Compiling stub using hydrogen\n");
+ HTracer::Instance()->TraceCompilation(&info_);
+ }
+ HBasicBlock* next_block = graph()->CreateBasicBlock();
+ next_block->SetInitialEnvironment(graph()->start_environment());
+ HGoto* jump = new(zone()) HGoto(next_block);
+ graph()->entry_block()->Finish(jump);
+ set_current_block(next_block);
+
+ int major_key = stub()->MajorKey();
+ CodeStubInterfaceDescriptor* descriptor =
+ info_.isolate()->code_stub_interface_descriptor(major_key);
+ if (descriptor->register_param_count_ < 0) {
+ stub()->InitializeInterfaceDescriptor(info_.isolate(), descriptor);
+ }
+ parameters_.Reset(new HParameter*[descriptor->register_param_count_]);
+
+ HGraph* graph = this->graph();
+ Zone* zone = this->zone();
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ HParameter* param = new(zone) HParameter(i);
+ AddInstruction(param);
+ graph->start_environment()->Push(param);
+ parameters_[i] = param;
+ }
+ AddSimulate(BailoutId::StubEntry());
+
+ BuildCodeStub();
+
+ return true;
+}
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+ explicit CodeStubGraphBuilder(Stub* stub)
+ : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+
+ protected:
+ virtual void BuildCodeStub();
+ Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+template <>
+void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+
+ HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+ GetParameter(0), GetParameter(1), NULL, NULL,
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
+ AddInstruction(load);
+
+ HReturn* ret = new(zone) HReturn(load);
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+ CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
+ return CodeFromGraph(builder.CreateGraph());
+}
+
+
+} } // namespace v8::internal
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 276c87e..c7d4c80 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -48,20 +48,6 @@
}
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- masm->isolate()->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(masm, false);
-
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- NoCurrentFrameScope scope(masm);
- Generate(masm);
-}
-
-
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -72,8 +58,7 @@
}
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -87,6 +72,39 @@
}
+Handle<Code> PlatformCodeStub::GenerateCode() {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ Generate(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()), GetICState());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
Handle<Code> CodeStub::GetCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
@@ -102,23 +120,10 @@
{
HandleScope scope(isolate);
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ Handle<Code> new_object = GenerateCode();
new_object->set_major_key(MajorKey());
FinishCode(new_object);
- RecordCodeGeneration(*new_object, &masm);
+ RecordCodeGeneration(*new_object, isolate);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@@ -416,36 +421,8 @@
}
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
- break;
- case DICTIONARY_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ae113f5..527abde 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -162,20 +162,29 @@
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out, Isolate* isolate);
+ // Returns information for computing the number key.
+ virtual Major MajorKey() = 0;
+ virtual int MinorKey() = 0;
+
protected:
static bool CanUseFPRegisters();
- private:
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
// Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
+ virtual Handle<Code> GenerateCode() = 0;
+ // BinaryOpStub needs to override this.
+ virtual InlineCacheState GetICState() {
+ return UNINITIALIZED;
+ }
+
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+
+ private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+ void RecordCodeGeneration(Code* code, Isolate* isolate);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
@@ -184,18 +193,9 @@
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
// BinaryOpStub needs to override this.
virtual int GetCodeKind();
- // BinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
-
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
@@ -213,10 +213,6 @@
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -232,6 +228,51 @@
};
+class PlatformCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode();
+
+ virtual int GetCodeKind() { return Code::STUB; }
+
+ protected:
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+struct CodeStubInterfaceDescriptor {
+ CodeStubInterfaceDescriptor()
+ : register_param_count_(-1),
+ register_params_(NULL) { }
+ int register_param_count_;
+ Register* register_params_;
+ Handle<Code> deoptimization_handler_;
+};
+
+
+class HGraph;
+struct Register;
+class HydrogenCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode() = 0;
+
+ virtual int GetCodeKind() { return Code::COMPILED_STUB; }
+
+ CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
+ return isolate->code_stub_interface_descriptor(MajorKey());
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) = 0;
+
+ protected:
+ Handle<Code> CodeFromGraph(HGraph* graph);
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@@ -289,7 +330,7 @@
};
-class StackCheckStub : public CodeStub {
+class StackCheckStub : public PlatformCodeStub {
public:
StackCheckStub() { }
@@ -301,7 +342,7 @@
};
-class InterruptStub : public CodeStub {
+class InterruptStub : public PlatformCodeStub {
public:
InterruptStub() { }
@@ -313,7 +354,7 @@
};
-class ToNumberStub: public CodeStub {
+class ToNumberStub: public PlatformCodeStub {
public:
ToNumberStub() { }
@@ -325,7 +366,7 @@
};
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public PlatformCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode)
: language_mode_(language_mode) { }
@@ -341,7 +382,7 @@
};
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -359,7 +400,7 @@
};
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -377,7 +418,7 @@
};
-class FastCloneShallowArrayStub : public CodeStub {
+class FastCloneShallowArrayStub : public PlatformCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
@@ -410,7 +451,7 @@
};
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public PlatformCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
@@ -430,7 +471,7 @@
};
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
kNoFlags = 0,
@@ -468,7 +509,7 @@
};
-class MathPowStub: public CodeStub {
+class MathPowStub: public PlatformCodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
@@ -484,7 +525,7 @@
};
-class BinaryOpStub: public CodeStub {
+class BinaryOpStub: public PlatformCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
@@ -600,7 +641,7 @@
};
-class ICCompareStub: public CodeStub {
+class ICCompareStub: public PlatformCodeStub {
public:
ICCompareStub(Token::Value op,
CompareIC::State left,
@@ -666,7 +707,7 @@
};
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -700,7 +741,7 @@
};
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
public:
JSEntryStub() { }
@@ -734,7 +775,7 @@
};
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
@@ -761,7 +802,7 @@
};
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
public:
RegExpExecStub() { }
@@ -773,7 +814,7 @@
};
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
public:
RegExpConstructResultStub() { }
@@ -785,7 +826,7 @@
};
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(int argc, CallFunctionFlags flags)
: argc_(argc), flags_(flags) { }
@@ -826,7 +867,7 @@
};
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
public:
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
@@ -1017,25 +1058,54 @@
};
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public:
- explicit KeyedLoadElementStub(ElementsKind elements_kind)
- : elements_kind_(elements_kind)
- { }
+ KeyedLoadDictionaryElementStub() {}
Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return elements_kind_; }
+ int MinorKey() { return DICTIONARY_ELEMENTS; }
void Generate(MacroAssembler* masm);
private:
- ElementsKind elements_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
};
-class KeyedStoreElementStub : public CodeStub {
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ bit_field_ = ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array);
+ }
+
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return bit_field_; }
+
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class IsJSArrayBits: public BitField<bool, 8, 1> {};
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ uint32_t bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+};
+
+
+class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind,
@@ -1070,7 +1140,7 @@
};
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public PlatformCodeStub {
public:
enum Type {
UNDEFINED,
@@ -1140,7 +1210,7 @@
};
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public PlatformCodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
@@ -1181,7 +1251,7 @@
};
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
public:
StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
@@ -1200,7 +1270,7 @@
};
-class ProfileEntryHookStub : public CodeStub {
+class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub() {}
diff --git a/src/codegen.cc b/src/codegen.cc
index 83ac854..c8bdf68 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -121,19 +121,21 @@
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
+ if (code->kind() != Code::COMPILED_STUB) {
+ Handle<Script> script = info->script();
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(function->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ function->end_position() - function->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
}
- PrintF("\n\n");
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
@@ -145,7 +147,12 @@
} else {
PrintF("--- Code ---\n");
}
- code->Disassemble(*function->debug_name()->ToCString());
+ if (info->IsStub()) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ code->Disassemble(CodeStub::MajorName(major_key, false));
+ } else {
+ code->Disassemble(*function->debug_name()->ToCString());
+ }
}
#endif // ENABLE_DISASSEMBLER
}
diff --git a/src/codegen.h b/src/codegen.h
index 3d14502..0ac68c2 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -104,6 +104,19 @@
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
+
+class SeqStringSetCharGenerator : public AllStatic {
+ public:
+ static void Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
+};
+
+
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 5779aae..ceac829 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -55,7 +55,7 @@
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script->GetIsolate(), BASE, zone);
}
@@ -65,7 +65,7 @@
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -76,12 +76,22 @@
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script_->GetIsolate(), BASE, zone);
}
-void CompilationInfo::Initialize(Zone* zone) {
- isolate_ = script_->GetIsolate();
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate, Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(isolate, STUB, zone);
+ code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+ isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
@@ -89,8 +99,13 @@
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
+ code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
- mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
+ if (mode == STUB) {
+ mode_ = STUB;
+ return;
+ }
+ mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
@@ -107,6 +122,33 @@
}
+int CompilationInfo::num_parameters() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_parameters();
+ }
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_heap_slots();
+ }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+ if (IsStub()) {
+ return Code::ComputeFlags(Code::COMPILED_STUB);
+ } else {
+ return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ }
+}
+
+
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@@ -317,13 +359,13 @@
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info()->function());
+ HTracer::Instance()->TraceCompilation(info());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
+ graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@@ -376,7 +418,7 @@
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen();
+ Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
return AbortOptimization();
diff --git a/src/compiler.h b/src/compiler.h
index 653d5f1..6d374d9 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -38,6 +38,7 @@
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
+class HydrogenCodeStub;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
@@ -46,6 +47,7 @@
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
@@ -72,10 +74,14 @@
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
+ HydrogenCodeStub* code_stub() {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ int num_parameters() const;
+ int num_heap_slots() const;
+ Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
@@ -98,9 +104,31 @@
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
+
bool is_native() const {
return IsNative::decode(flags_);
}
+
+ bool is_calling() const {
+ return is_deferred_calling() || is_non_deferred_calling();
+ }
+
+ void MarkAsDeferredCalling() {
+ flags_ |= IsDeferredCalling::encode(true);
+ }
+
+ bool is_deferred_calling() const {
+ return IsDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsNonDeferredCalling() {
+ flags_ |= IsNonDeferredCalling::encode(true);
+ }
+
+ bool is_non_deferred_calling() const {
+ return IsNonDeferredCalling::decode(flags_);
+ }
+
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@@ -151,6 +179,7 @@
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
+ bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
@@ -209,10 +238,11 @@
enum Mode {
BASE,
OPTIMIZE,
- NONOPT
+ NONOPT,
+ STUB
};
- void Initialize(Zone* zone);
+ void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@@ -238,6 +268,12 @@
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsCalling: public BitField<bool, 9, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
unsigned flags_;
@@ -250,6 +286,8 @@
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
+ // For compiled stubs, the stub object
+ HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@@ -310,6 +348,10 @@
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ : CompilationInfo(stub, isolate, &zone_),
+ zone_(isolate),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
@@ -335,7 +377,7 @@
class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@@ -377,7 +419,7 @@
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
- HGraphBuilder* graph_builder_;
+ HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
diff --git a/src/counters.cc b/src/counters.cc
index e152c75..6d453d6 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -77,7 +77,7 @@
// Start the timer.
void HistogramTimer::Start() {
- if (histogram_.Enabled() || FLAG_log_timer_events) {
+ if (histogram_.Enabled() || FLAG_log_internal_timer_events) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@@ -91,7 +91,7 @@
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds);
}
- if (FLAG_log_timer_events) {
+ if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(),
TimerEvent(histogram_.name_, start_time_, OS::Ticks()));
}
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 9d16211..357e4b8 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -410,17 +410,24 @@
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
- function->shared()->increment_deopt_count();
+ // For COMPILED_STUBs called from builtins, the function pointer
+ // is a SMI indicating an internal frame.
+ if (function->IsSmi()) {
+ function = NULL;
+ }
+ if (function != NULL && function->IsOptimized()) {
+ function->shared()->increment_deopt_count();
+ }
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
- optimized_code_ = function_->code();
+ compiled_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+ for (RelocIterator it(compiled_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
@@ -436,18 +443,22 @@
}
}
} else if (type == LAZY) {
- optimized_code_ = FindDeoptimizingCodeFromAddress(from);
- ASSERT(optimized_code_ != NULL);
+ compiled_code_ = FindDeoptimizingCodeFromAddress(from);
+ if (compiled_code_ == NULL) {
+ compiled_code_ =
+ static_cast<Code*>(isolate->heap()->FindCodeObject(from));
+ }
+ ASSERT(compiled_code_ != NULL);
} else if (type == OSR) {
// The function has already been optimized and we're transitioning
// from the unoptimized shared version to the optimized one in the
// function. The return address (from) points to unoptimized code.
- optimized_code_ = function_->code();
- ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!optimized_code_->contains(from));
+ compiled_code_ = function_->code();
+ ASSERT(compiled_code_->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(!compiled_code_->contains(from));
} else if (type == DEBUGGER) {
- optimized_code_ = optimized_code;
- ASSERT(optimized_code_->contains(from));
+ compiled_code_ = optimized_code;
+ ASSERT(compiled_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
@@ -535,7 +546,7 @@
shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString());
- UNREACHABLE();
+ FATAL("unable to find pc offset during deoptimization");
return -1;
}
@@ -573,7 +584,7 @@
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -618,6 +629,9 @@
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
+ case Translation::COMPILED_STUB_FRAME:
+ DoCompiledStubFrame(&iterator, i);
+ break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
@@ -630,6 +644,7 @@
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
case Translation::DUPLICATE:
+ default:
UNREACHABLE();
break;
}
@@ -809,6 +824,7 @@
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -1117,6 +1133,7 @@
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@@ -1337,8 +1354,9 @@
// environment at the OSR entry. The code for that his built into
// the DoComputeOsrOutputFrame function for now.
} else {
- unsigned stack_slots = optimized_code_->stack_slots();
- unsigned outgoing_size = ComputeOutgoingArgumentSize();
+ unsigned stack_slots = compiled_code_->stack_slots();
+ unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
+ ? 0 : ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
#endif
@@ -1357,6 +1375,10 @@
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
+ if (function->IsSmi()) {
+ ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+ return 0;
+ }
unsigned arguments = function->shared()->formal_parameter_count() + 1;
return arguments * kPointerSize;
}
@@ -1364,7 +1386,7 @@
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
@@ -1372,7 +1394,7 @@
Object* Deoptimizer::ComputeLiteral(int index) const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
@@ -1403,8 +1425,6 @@
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(!Serializer::enabled());
-
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
@@ -1419,7 +1439,6 @@
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
@@ -1681,6 +1700,11 @@
}
+void Translation::BeginCompiledStubFrame() {
+ buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@@ -1762,6 +1786,7 @@
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
+ case COMPILED_STUB_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
@@ -1792,6 +1817,8 @@
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
+ case COMPILED_STUB_FRAME:
+ return "COMPILED_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@@ -1899,6 +1926,10 @@
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
+
+ case Translation::COMPILED_STUB_FRAME:
+ UNREACHABLE();
+ break;
}
UNREACHABLE();
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 89955b3..dbcdf61 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -135,6 +135,8 @@
int output_count() const { return output_count_; }
+ Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -297,6 +299,9 @@
static size_t GetMaxDeoptTableSize();
+ static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+ int max_entry_id);
+
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
@@ -320,6 +325,8 @@
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
+ void DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@@ -342,8 +349,6 @@
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
- static void EnsureCodeForDeoptimizationEntry(BailoutType type,
- int max_entry_id);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -360,7 +365,7 @@
Isolate* isolate_;
JSFunction* function_;
- Code* optimized_code_;
+ Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
@@ -530,7 +535,7 @@
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+ double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
@@ -600,6 +605,7 @@
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
+ COMPILED_STUB_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@@ -630,6 +636,7 @@
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 9f8b9a8..05d6b9b 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -287,7 +287,12 @@
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ }
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
@@ -322,7 +327,8 @@
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+ int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 207fcee..acb0ccc 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -664,7 +664,10 @@
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
-DEFINE_bool(log_timer_events, false, "Log histogram timer events.")
+DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
+DEFINE_bool(log_timer_events, false,
+ "Time events including external callbacks.")
+DEFINE_implication(log_timer_events, log_internal_timer_events)
//
// Disassembler only flags
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 27a526c..4753932 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -235,11 +235,21 @@
}
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline CompiledFrame::CompiledFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
+inline StubFrame::StubFrame(StackFrameIterator* iterator)
+ : CompiledFrame(iterator) {
+}
+
+
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+ : CompiledFrame(iterator) {
+}
+
+
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
}
diff --git a/src/frames.cc b/src/frames.cc
index 3b60fb5..cb9ffba 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -617,7 +617,7 @@
}
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+void CompiledFrame::Iterate(ObjectVisitor* v) const {
#ifdef DEBUG
// Make sure that optimized frames do not contain any stack handlers.
StackHandlerIterator it(this, top_handler());
@@ -649,7 +649,7 @@
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
- parameters_base += DoubleRegister::kNumAllocatableRegisters *
+ parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
@@ -681,14 +681,24 @@
}
}
+ // Visit the return address in the callee and incoming arguments.
+ IteratePc(v, pc_address(), code);
+}
+
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+ CompiledFrame::Iterate(v);
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+ CompiledFrame::Iterate(v);
+
// Visit the context and the function.
Object** fixed_base = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
-
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
}
diff --git a/src/frames.h b/src/frames.h
index 30f7e1f..6a9570e 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -136,6 +136,7 @@
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
+ V(STUB, StubFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -555,7 +556,33 @@
};
-class OptimizedFrame : public JavaScriptFrame {
+class CompiledFrame : public JavaScriptFrame {
+ public:
+ virtual Type type() const = 0;
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+ inline explicit CompiledFrame(StackFrameIterator* iterator);
+};
+
+
+class StubFrame : public CompiledFrame {
+ public:
+ virtual Type type() const { return STUB; }
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+ inline explicit StubFrame(StackFrameIterator* iterator);
+
+ friend class StackFrameIterator;
+};
+
+
+class OptimizedFrame : public CompiledFrame {
public:
virtual Type type() const { return OPTIMIZED; }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 01d9bd0..e136170 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -398,6 +398,7 @@
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
+ InitializeAstVisitor();
}
diff --git a/src/full-codegen.h b/src/full-codegen.h
index cfa7da3..364f0c3 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -48,7 +48,9 @@
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {}
+ BreakableStatementChecker() : is_breakable_(false) {
+ InitializeAstVisitor();
+ }
void Check(Statement* stmt);
void Check(Expression* stmt);
@@ -63,6 +65,7 @@
bool is_breakable_;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
};
@@ -824,6 +827,7 @@
friend class NestedStatement;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 301b099..45a93f9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -65,23 +65,29 @@
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control);
+ control,
+ resolver);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control);
+ control,
+ resolver);
}
@@ -122,16 +128,18 @@
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result, control);
+ HeapSnapshotGenerator generator(result, control, resolver);
generation_completed = generator.GenerateSnapshot();
break;
}
@@ -147,10 +155,13 @@
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
+ return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
+ resolver);
}
void HeapProfiler::StartHeapObjectsTrackingImpl() {
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 346177b..9d3ba6f 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -51,12 +51,16 @@
static size_t GetMemorySizeUsedByProfiler();
- static HeapSnapshot* TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control);
- static HeapSnapshot* TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control);
+ static HeapSnapshot* TakeSnapshot(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ static HeapSnapshot* TakeSnapshot(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
@@ -81,12 +85,16 @@
private:
HeapProfiler();
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control);
- HeapSnapshot* TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control);
+ HeapSnapshot* TakeSnapshotImpl(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ HeapSnapshot* TakeSnapshotImpl(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 47a9da3..3006734 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -155,6 +155,7 @@
V(Return) \
V(Ror) \
V(Sar) \
+ V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
V(Simulate) \
@@ -5212,6 +5213,33 @@
};
+class HSeqStringSetChar: public HTemplateInstruction<3> {
+ public:
+ HSeqStringSetChar(String::Encoding encoding,
+ HValue* string,
+ HValue* index,
+ HValue* value) : encoding_(encoding) {
+ SetOperandAt(0, string);
+ SetOperandAt(1, index);
+ SetOperandAt(2, value);
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ HValue* string() { return OperandAt(0); }
+ HValue* index() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class HDeleteProperty: public HBinaryOperation {
public:
HDeleteProperty(HValue* context, HValue* obj, HValue* key)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 1590ab3..6291dcb 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -138,6 +138,7 @@
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
+ ast_id == BailoutId::StubEntry() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
@@ -621,33 +622,204 @@
}
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : function_state_(NULL),
+HGraph* HGraphBuilder::CreateGraph() {
+ graph_ = new(zone()) HGraph(info_);
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
+ HPhase phase("H_Block building");
+ set_current_block(graph()->entry_block());
+ if (!BuildGraph()) return NULL;
+ return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddInstruction(instr);
+ return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+ RemovableSimulate removable) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddSimulate(id, removable);
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ val = AddInstruction(new(zone) HClampToUint8(val));
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ break;
+ }
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return new(zone) HStoreKeyed(external_elements, checked_key,
+ val, elements_kind);
+ } else {
+ ASSERT(val == NULL);
+ HLoadKeyed* load =
+ new(zone) HLoadKeyed(
+ external_elements, checked_key, dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
+ }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* load_dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone) HCheckSmi(val));
+ // Fall through.
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+ // It's an element load (!is_store).
+ return new(zone) HLoadKeyed(elements,
+ checked_key,
+ load_dependency,
+ elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+ // on a HElementsTransition instruction. The flag can also be removed if the
+ // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+ // ElementsKind transitions. Finally, the dependency can be removed for stores
+ // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+ // generated store code.
+ if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+ (elements_kind == FAST_ELEMENTS && is_store)) {
+ if (mapcheck != NULL) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ }
+ bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+ bool fast_elements = IsFastObjectElementsKind(elements_kind);
+ HInstruction* elements =
+ AddInstruction(new(zone) HLoadElements(object, mapcheck));
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
+ HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+ elements, Isolate::Current()->factory()->fixed_array_map(), zone);
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ AddInstruction(check_cow_map);
+ }
+ HInstruction* length = NULL;
+ HInstruction* checked_key = NULL;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ HLoadExternalArrayPointer* external_elements =
+ new(zone) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+ }
+ ASSERT(fast_smi_only_elements ||
+ fast_elements ||
+ IsFastDoubleElementsKind(elements_kind));
+ if (is_js_array) {
+ length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
+ HType::Smi()));
+ } else {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ }
+ checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+ TypeFeedbackOracle* oracle)
+ : HGraphBuilder(info),
+ function_state_(NULL),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
- graph_(NULL),
- current_block_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- zone_(info->zone()),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
+ InitializeAstVisitor();
}
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
return first;
} else {
- HBasicBlock* join_block = graph_->CreateBasicBlock();
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
first->Goto(join_block);
second->Goto(join_block);
join_block->SetJoinId(join_id);
@@ -656,9 +828,9 @@
}
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
if (exit_block != NULL) exit_block->Goto(continue_block);
continue_block->SetJoinId(statement->ContinueId());
@@ -668,11 +840,11 @@
}
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block) {
if (body_exit != NULL) body_exit->Goto(loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
@@ -703,8 +875,13 @@
is_recursive_(false),
use_optimistic_licm_(false),
type_change_checksum_(0) {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ if (info->IsStub()) {
+ start_environment_ =
+ new(zone_) HEnvironment(zone_);
+ } else {
+ start_environment_ =
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ }
start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
@@ -2893,7 +3070,7 @@
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind)
@@ -2942,7 +3119,7 @@
// Implementation of utility classes to represent an expression's context in
// the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
@@ -3053,7 +3230,7 @@
void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
@@ -3084,7 +3261,7 @@
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout("arguments object value in a test context");
}
@@ -3101,7 +3278,7 @@
}
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
#define CHECK_BAILOUT(call) \
do { \
call; \
@@ -3116,25 +3293,26 @@
} while (false)
-void HGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(const char* reason) {
info()->set_bailout_reason(reason);
SetStackOverflow();
}
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
EffectContext for_effect(this);
Visit(expr);
}
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+ ArgumentsAllowedFlag flag) {
ValueContext for_value(this, flag);
Visit(expr);
}
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
for_value.set_for_typeof(true);
Visit(expr);
@@ -3142,113 +3320,108 @@
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+ HBasicBlock* true_block,
+ HBasicBlock* false_block) {
TestContext for_test(this, expr, oracle(), true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
Push(AddInstruction(new(zone()) HPushArgument(Pop())));
}
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+ ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
CHECK_ALIVE(VisitArgument(arguments->at(i)));
}
}
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+ ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info());
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
+bool HOptimizedGraphBuilder::BuildGraph() {
+ Scope* scope = info()->scope();
+ if (scope->HasIllegalRedeclaration()) {
+ Bailout("function with illegal redeclaration");
+ return false;
+ }
+ if (scope->calls_eval()) {
+ Bailout("function calls eval");
+ return false;
+ }
+ SetUpScope(scope);
- {
- HPhase phase("H_Block building");
- current_block_ = graph()->entry_block();
+ // Add an edge to the body entry. This is warty: the graph's start
+ // environment will be used by the Lithium translation as the initial
+ // environment on graph entry, but it has now been mutated by the
+ // Hydrogen translation of the instructions in the start block. This
+ // environment uses values which have not been defined yet. These
+ // Hydrogen instructions will then be replayed by the Lithium
+ // translation, so they cannot have an environment effect. The edge to
+ // the body's entry block (along with some special logic for the start
+ // block in HInstruction::InsertAfter) seals the start block from
+ // getting unwanted instructions inserted.
+ //
+ // TODO(kmillikin): Fix this. Stop mutating the initial environment.
+ // Make the Hydrogen instructions in the initial block into Hydrogen
+ // values (but not instructions), present in the initial environment and
+ // not replayed by the Lithium translation.
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ current_block()->Goto(body_entry);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
+ set_current_block(body_entry);
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return NULL;
- }
- if (scope->calls_eval()) {
- Bailout("function calls eval");
- return NULL;
- }
- SetUpScope(scope);
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+ VisitDeclarations(scope->declarations());
+ AddSimulate(BailoutId::Declarations());
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
+ HValue* context = environment()->LookupContext();
+ AddInstruction(
+ new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
+ VisitStatements(info()->function()->body());
+ if (HasStackOverflow()) return false;
- HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
-
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return NULL;
-
- if (current_block() != NULL) {
- HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
-
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
+ if (current_block() != NULL) {
+ HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+ current_block()->FinishExit(instr);
+ set_current_block(NULL);
}
- return graph();
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<TypeFeedbackInfo> type_info(
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
+
+ return true;
}
+
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
*bailout_reason = SmartArrayPointer<char>();
OrderBlocks();
@@ -3772,33 +3945,20 @@
}
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id, removable);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
+void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
ASSERT(current_block() != NULL);
current_block()->AddPhi(instr);
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
Push(instr);
AddInstruction(instr);
}
template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
@@ -3812,11 +3972,11 @@
}
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
- graph_->set_undefined_constant(undefined_constant);
+ graph()->set_undefined_constant(undefined_constant);
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
@@ -3855,21 +4015,21 @@
}
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
CHECK_ALIVE(Visit(statements->at(i)));
}
}
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
HBasicBlock* b = graph()->CreateBasicBlock();
b->SetInitialEnvironment(env);
return b;
}
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
HBasicBlock* header = graph()->CreateBasicBlock();
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
header->SetInitialEnvironment(entry_env);
@@ -3878,7 +4038,7 @@
}
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3898,7 +4058,8 @@
}
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3906,14 +4067,14 @@
}
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3952,7 +4113,7 @@
}
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
int* drop_extra) {
@@ -3991,7 +4152,8 @@
}
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+ ContinueStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4005,7 +4167,7 @@
}
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4019,7 +4181,7 @@
}
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4091,7 +4253,7 @@
}
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4099,7 +4261,7 @@
}
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4285,12 +4447,12 @@
}
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
return statement->OsrEntryId() == info()->osr_ast_id();
}
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
@@ -4340,9 +4502,9 @@
}
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
AddSimulate(stmt->StackCheckId());
HValue* context = environment()->LookupContext();
@@ -4355,7 +4517,7 @@
}
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4398,7 +4560,7 @@
}
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4442,7 +4604,7 @@
}
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4494,7 +4656,7 @@
}
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4615,7 +4777,7 @@
}
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4623,7 +4785,8 @@
}
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4631,7 +4794,7 @@
}
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4659,7 +4822,7 @@
}
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4678,7 +4841,7 @@
}
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -4687,7 +4850,7 @@
}
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4725,8 +4888,9 @@
}
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+ HOptimizedGraphBuilder::LookupGlobalProperty(
+ Variable* var, LookupResult* lookup, bool is_store) {
if (var->is_this() || !info()->has_global_object()) {
return kUseGeneric;
}
@@ -4742,7 +4906,7 @@
}
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
int length = info()->scope()->ContextChainLength(var->scope());
@@ -4755,7 +4919,7 @@
}
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4828,7 +4992,7 @@
}
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4838,7 +5002,7 @@
}
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4997,7 +5161,7 @@
}
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5102,7 +5266,7 @@
}
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5236,18 +5400,19 @@
}
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map) {
+void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+ Handle<Map> map) {
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map,
+ LookupResult* lookup) {
ASSERT(lookup->IsFound());
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
@@ -5299,9 +5464,10 @@
}
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreNamedGeneric(
context,
@@ -5312,11 +5478,12 @@
}
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
+ HValue* object,
+ HValue* value,
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
AddInstruction(new(zone()) HPushArgument(value));
@@ -5324,10 +5491,11 @@
}
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
@@ -5340,10 +5508,11 @@
}
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+ Property* expr,
+ HValue* object,
+ SmallMapList* types,
+ Handle<String> name) {
int count = 0;
int previous_field_offset = 0;
bool previous_field_is_in_object = false;
@@ -5395,11 +5564,12 @@
}
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+ Assignment* expr,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -5471,7 +5641,7 @@
}
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
expr->RecordTypeFeedback(oracle(), zone());
@@ -5554,10 +5724,11 @@
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+ Variable* var,
+ HValue* value,
+ int position,
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
@@ -5588,7 +5759,7 @@
}
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -5785,7 +5956,7 @@
}
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5912,7 +6083,7 @@
}
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5933,9 +6104,10 @@
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* lookup) {
+HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+ HValue* object,
+ Handle<Map> map,
+ LookupResult* lookup) {
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
@@ -5950,9 +6122,10 @@
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ Property* expr) {
if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
@@ -5962,20 +6135,22 @@
}
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
+ HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
return new(zone()) HCallConstantFunction(getter, 1);
}
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map) {
// Handle a load from a known field.
ASSERT(!map->is_dictionary_map());
LookupResult lookup(isolate());
@@ -6009,174 +6184,34 @@
}
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+ HValue* key) {
HValue* context = environment()->LookupContext();
return new(zone()) HLoadKeyedGeneric(context, object, key);
}
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
HValue* val,
HValue* dependency,
- ElementsKind elements_kind,
+ Handle<Map> map,
bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone()) HClampToUint8(val));
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone()) HStoreKeyed(external_elements,
- checked_key,
- val,
- elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone()) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(val));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyed(
- elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- return new(zone()) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store) {
HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
zone(), dependency);
AddInstruction(mapcheck);
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
- return BuildUncheckedMonomorphicElementAccess(object, key, val,
- mapcheck, map, is_store);
+ return BuildUncheckedMonomorphicElementAccess(
+ object, key, val,
+ mapcheck, map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store);
}
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store) {
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
- (map->elements_kind() == FAST_ELEMENTS && is_store)) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- bool fast_smi_only_elements = map->has_fast_smi_elements();
- bool fast_elements = map->has_fast_object_elements();
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, mapcheck));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(), zone());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
- }
- HInstruction* length = NULL;
- HInstruction* checked_key = NULL;
- if (map->has_external_array_elements()) {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
- }
- ASSERT(fast_smi_only_elements ||
- fast_elements ||
- map->has_fast_double_elements());
- if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
- HType::Smi()));
- } else {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- }
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- return BuildFastElementAccess(elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
-}
-
-
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* object,
HValue* key,
HValue* val,
@@ -6224,19 +6259,23 @@
HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
AddInstruction(check_maps);
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps, most_general_consolidated_map, false);
+ object, key, val, check_maps,
+ most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+ most_general_consolidated_map->elements_kind(),
+ false);
return instr;
}
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
*has_side_effects = false;
AddInstruction(new(zone()) HCheckNonSmi(object));
SmallMapList* maps = prop->GetReceiverTypes();
@@ -6421,8 +6460,8 @@
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
+ external_elements, checked_key, val,
+ elements_kind_branch, elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6442,14 +6481,15 @@
}
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+ HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
@@ -6479,9 +6519,10 @@
}
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+ HValue* object,
+ HValue* key,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreKeyedGeneric(
context,
@@ -6492,7 +6533,7 @@
}
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6520,7 +6561,7 @@
}
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
if (!proxy->var()->IsStackAllocated()) return false;
@@ -6579,7 +6620,7 @@
}
-void HGraphBuilder::VisitProperty(Property* expr) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -6670,8 +6711,8 @@
}
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map) {
if (!holder.is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
@@ -6679,9 +6720,10 @@
}
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+ Handle<JSObject> holder,
+ HValue* receiver,
+ Handle<Map> receiver_map) {
// Constant functions have the nice property that the map will change if they
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
@@ -6723,10 +6765,11 @@
}
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -6828,9 +6871,9 @@
}
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* reason) {
if (FLAG_trace_inlining) {
SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
@@ -6849,7 +6892,7 @@
static const int kNotInlinable = 1000000000;
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
// Precondition: call is monomorphic and we have found a target with the
@@ -6880,13 +6923,13 @@
}
-bool HGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+ Handle<JSFunction> target,
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7195,7 +7238,7 @@
}
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7212,8 +7255,8 @@
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
expr->arguments()->length(),
@@ -7224,8 +7267,8 @@
}
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Property* prop) {
return TryInline(CALL_AS_METHOD,
getter,
0,
@@ -7236,9 +7279,9 @@
}
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_METHOD,
setter,
1,
@@ -7249,7 +7292,8 @@
}
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+ bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
@@ -7283,10 +7327,11 @@
}
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+ Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7416,7 +7461,7 @@
}
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
ASSERT(prop != NULL);
@@ -7544,7 +7589,7 @@
}
-void HGraphBuilder::VisitCall(Call* expr) {
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7774,7 +7819,7 @@
}
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7838,20 +7883,21 @@
// Support for generating inlined runtime functions.
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HGraphBuilder::Generate##Name,
+ &HOptimizedGraphBuilder::Generate##Name,
-const HGraphBuilder::InlineFunctionGenerator
- HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+ HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7889,7 +7935,7 @@
}
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7905,7 +7951,7 @@
}
}
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (prop != NULL) {
@@ -7940,13 +7986,13 @@
}
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
@@ -7955,22 +8001,22 @@
}
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstant1());
+ new(zone()) HMul(context, value, graph()->GetConstant1());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+ new(zone()) HMul(context, value, graph()->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
@@ -7983,7 +8029,7 @@
}
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
TypeInfo info = oracle()->UnaryType(expr);
@@ -7996,7 +8042,7 @@
}
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
@@ -8040,8 +8086,9 @@
}
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
- CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+ bool returns_original_input,
+ CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
TypeInfo info = oracle()->IncrementType(expr);
Representation rep = ToRepresentation(info);
@@ -8063,8 +8110,8 @@
// to simulate the expression stack after this instruction.
// Any later failures deopt to the load of the input or earlier.
HConstant* delta = (expr->op() == Token::INC)
- ? graph_->GetConstant1()
- : graph_->GetConstantMinus1();
+ ? graph()->GetConstant1()
+ : graph()->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
// We can't insert a simulate here, because it would break deoptimization,
@@ -8077,7 +8124,7 @@
}
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8161,7 +8208,7 @@
if (prop->key()->IsPropertyName()) {
// Named property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
@@ -8222,7 +8269,7 @@
} else {
// Keyed property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
@@ -8262,9 +8309,10 @@
}
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index) {
+HStringCharCodeAt* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+ HValue* context,
+ HValue* string,
+ HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HStringLength* length = new(zone()) HStringLength(string);
@@ -8292,10 +8340,10 @@
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
@@ -8330,9 +8378,10 @@
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right) {
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right) {
HValue* context = environment()->LookupContext();
TypeInfo left_info, right_info, result_info, combined_info;
oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
@@ -8425,7 +8474,7 @@
}
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8441,7 +8490,7 @@
}
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->left()));
// Visit the right subexpression in the same AST context as the entire
// expression.
@@ -8449,7 +8498,7 @@
}
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
@@ -8539,7 +8588,7 @@
}
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
HValue* right = Pop();
@@ -8550,7 +8599,7 @@
}
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
@@ -8560,9 +8609,9 @@
}
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check) {
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+ HTypeof* typeof_expr,
+ Handle<String> check) {
// Note: The HTypeof itself is removed during canonicalization, if possible.
HValue* value = typeof_expr->value();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
@@ -8632,7 +8681,7 @@
}
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8785,9 +8834,9 @@
}
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+ HValue* value,
+ NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8799,7 +8848,7 @@
}
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
@@ -8812,7 +8861,7 @@
}
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8821,7 +8870,8 @@
}
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
ASSERT(globals_.is_empty());
AstVisitor::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
@@ -8839,7 +8889,8 @@
}
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
@@ -8876,7 +8927,8 @@
}
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
@@ -8914,49 +8966,52 @@
}
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+ ModuleDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+ ImportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+ ExportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
UNREACHABLE();
}
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8965,7 +9020,7 @@
}
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8977,7 +9032,7 @@
}
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8987,7 +9042,7 @@
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8997,7 +9052,7 @@
}
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9007,7 +9062,7 @@
}
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9017,7 +9072,7 @@
}
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9026,12 +9081,12 @@
}
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
return Bailout("inlined runtime function: IsNonNegativeSmi");
}
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9041,7 +9096,7 @@
}
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
return Bailout(
"inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
@@ -9049,7 +9104,7 @@
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
@@ -9065,7 +9120,7 @@
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
@@ -9078,7 +9133,7 @@
}
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
@@ -9098,14 +9153,14 @@
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
return Bailout("inlined runtime function: ClassOf");
}
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9114,7 +9169,7 @@
}
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
@@ -9125,7 +9180,40 @@
}
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ String::ONE_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HValue* context = environment()->LookupContext();
+ HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
+ AddInstruction(char_code);
+ HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ String::TWO_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9168,7 +9256,7 @@
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9181,7 +9269,7 @@
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
@@ -9193,7 +9281,7 @@
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9209,7 +9297,7 @@
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9221,14 +9309,14 @@
}
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
AddInstruction(global_object);
@@ -9238,7 +9326,7 @@
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9249,7 +9337,7 @@
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9260,7 +9348,7 @@
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9272,7 +9360,7 @@
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9283,7 +9371,7 @@
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9295,13 +9383,13 @@
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
return Bailout("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9313,7 +9401,7 @@
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
@@ -9357,7 +9445,7 @@
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9368,7 +9456,7 @@
}
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9380,7 +9468,7 @@
}
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9392,7 +9480,7 @@
}
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9404,7 +9492,7 @@
}
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9416,18 +9504,18 @@
}
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
return Bailout("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
return Bailout("inlined runtime function: IsRegExpEquivalent");
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9436,7 +9524,7 @@
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
return Bailout("inlined runtime function: FastAsciiArrayJoin");
}
@@ -9466,6 +9554,23 @@
}
+HEnvironment::HEnvironment(Zone* zone)
+ : values_(0, zone),
+ assigned_variables_(0, zone),
+ frame_type_(STUB),
+ parameter_count_(0),
+ specials_count_(0),
+ local_count_(0),
+ outer_(NULL),
+ entry_(NULL),
+ pop_count_(0),
+ push_count_(0),
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
+ Initialize(0, 0, 0);
+}
+
+
HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
: values_(0, zone),
assigned_variables_(0, zone),
@@ -9733,11 +9838,17 @@
}
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
- Handle<String> name = function->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ if (info->IsOptimizing()) {
+ Handle<String> name = info->function()->debug_name();
+ PrintStringProperty("name", *name->ToCString());
+ PrintStringProperty("method", *name->ToCString());
+ } else {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("method", "stub");
+ }
PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 98b05d1..0837bf9 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -429,7 +429,8 @@
JS_CONSTRUCT,
JS_GETTER,
JS_SETTER,
- ARGUMENTS_ADAPTOR
+ ARGUMENTS_ADAPTOR,
+ STUB
};
@@ -440,6 +441,8 @@
Handle<JSFunction> closure,
Zone* zone);
+ explicit HEnvironment(Zone* zone);
+
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
@@ -636,7 +639,7 @@
};
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
enum ArgumentsAllowedFlag {
ARGUMENTS_NOT_ALLOWED,
@@ -672,10 +675,10 @@
bool is_for_typeof() { return for_typeof_; }
protected:
- AstContext(HGraphBuilder* owner, Expression::Context kind);
+ AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
- HGraphBuilder* owner() const { return owner_; }
+ HOptimizedGraphBuilder* owner() const { return owner_; }
inline Zone* zone() const;
@@ -686,7 +689,7 @@
#endif
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
bool for_typeof_;
@@ -695,7 +698,7 @@
class EffectContext: public AstContext {
public:
- explicit EffectContext(HGraphBuilder* owner)
+ explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
@@ -708,7 +711,7 @@
class ValueContext: public AstContext {
public:
- explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+ ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
@@ -726,7 +729,7 @@
class TestContext: public AstContext {
public:
- TestContext(HGraphBuilder* owner,
+ TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
@@ -766,7 +769,7 @@
class FunctionState {
public:
- FunctionState(HGraphBuilder* owner,
+ FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
@@ -796,7 +799,7 @@
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
TypeFeedbackOracle* oracle_;
@@ -828,7 +831,65 @@
};
-class HGraphBuilder: public AstVisitor {
+class HGraphBuilder {
+ public:
+ explicit HGraphBuilder(CompilationInfo* info)
+ : info_(info), graph_(NULL), current_block_(NULL) {}
+ virtual ~HGraphBuilder() {}
+
+ HBasicBlock* current_block() const { return current_block_; }
+ void set_current_block(HBasicBlock* block) { current_block_ = block; }
+ HEnvironment* environment() const {
+ return current_block()->last_environment();
+ }
+ Zone* zone() const { return info_->zone(); }
+ HGraph* graph() { return graph_; }
+
+ HGraph* CreateGraph();
+
+ // Adding instructions.
+ HInstruction* AddInstruction(HInstruction* instr);
+ void AddSimulate(BailoutId id,
+ RemovableSimulate removable = FIXED_SIMULATE);
+
+ protected:
+ virtual bool BuildGraph() = 0;
+
+ // Building common constructs
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ private:
+ HGraphBuilder();
+ CompilationInfo* info_;
+ HGraph* graph_;
+ HBasicBlock* current_block_;
+};
+
+
+class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@@ -864,7 +925,8 @@
// structures mirroring BreakableStatement nesting.
class BreakAndContinueScope BASE_EMBEDDED {
public:
- BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+ BreakAndContinueScope(BreakAndContinueInfo* info,
+ HOptimizedGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
@@ -872,7 +934,7 @@
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
- HGraphBuilder* owner() { return owner_; }
+ HOptimizedGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
@@ -880,32 +942,20 @@
private:
BreakAndContinueInfo* info_;
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
- HGraph* CreateGraph();
+ virtual bool BuildGraph();
// Simple accessors.
- HGraph* graph() const { return graph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
-
bool inline_bailout() { return inline_bailout_; }
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE);
-
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
@@ -928,9 +978,12 @@
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+ typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+ (CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@@ -1139,25 +1192,14 @@
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object,
+ HValue* key);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps);
- HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store);
-
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@@ -1197,14 +1239,6 @@
Handle<String> name,
Property* expr,
Handle<Map> map);
- HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
@@ -1246,8 +1280,6 @@
HValue** operand,
HValue** shift_amount);
- Zone* zone() const { return zone_; }
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -1261,20 +1293,16 @@
// A stack of breakable statements entered.
BreakAndContinueScope* break_scope_;
- HGraph* graph_;
- HBasicBlock* current_block_;
-
int inlined_count_;
ZoneList<Handle<Object> > globals_;
- Zone* zone_;
-
bool inline_bailout_;
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
+ friend class KeyedLoadFastElementStub;
- DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+ DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
};
@@ -1447,7 +1475,7 @@
class HTracer: public Malloced {
public:
- void TraceCompilation(FunctionLiteral* function);
+ void TraceCompilation(CompilationInfo* info);
void TraceHydrogen(const char* name, HGraph* graph);
void TraceLithium(const char* name, LChunk* chunk);
void TraceLiveRanges(const char* name, LAllocator* allocator);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 8cccaa5..f82defc 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -55,6 +55,33 @@
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+int IntelDoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumAllocatableRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumAllocatableRegisters;
+ }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumRegisters;
+ }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::AllocationIndexToString(index);
+ } else {
+ return X87TopOfStackRegister::AllocationIndexToString(index);
+ }
+}
+
+
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
@@ -2199,7 +2226,8 @@
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x18);
- XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ // Emit hint number in Reg position of RegR/M.
+ XMMRegister code = XMMRegister::from_code(level);
emit_sse_operand(code, src);
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index b1f421e..232a85e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -65,7 +65,10 @@
// and best performance in optimized code.
//
struct Register {
- static const int kNumAllocatableRegisters = 6;
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 8;
static inline const char* AllocationIndexToString(int index);
@@ -119,7 +122,7 @@
inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
@@ -133,22 +136,69 @@
inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
-struct XMMRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+ static const int kMaxNumAllocatableRegisters = 7;
+ static int NumAllocatableRegisters();
+ static int NumRegisters();
+ static const char* AllocationIndexToString(int index);
- static int ToAllocationIndex(XMMRegister reg) {
+ static int ToAllocationIndex(IntelDoubleRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
+ static IntelDoubleRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ return from_code(index + 1);
+ }
+
+ static IntelDoubleRegister from_code(int code) {
+ IntelDoubleRegister result = { code };
+ return result;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < NumRegisters();
+ }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+
+const IntelDoubleRegister double_register_0 = { 0 };
+const IntelDoubleRegister double_register_1 = { 1 };
+const IntelDoubleRegister double_register_2 = { 2 };
+const IntelDoubleRegister double_register_3 = { 3 };
+const IntelDoubleRegister double_register_4 = { 4 };
+const IntelDoubleRegister double_register_5 = { 5 };
+const IntelDoubleRegister double_register_6 = { 6 };
+const IntelDoubleRegister double_register_7 = { 7 };
+
+
+struct XMMRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 7;
+ static const int kNumRegisters = 8;
+
+ static XMMRegister from_code(int code) {
+ STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
+ XMMRegister result;
+ result.code_ = code;
+ return result;
+ }
+
+ bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index + 1);
}
@@ -165,34 +215,46 @@
};
return names[index];
}
-
- static XMMRegister from_code(int code) {
- XMMRegister r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- int code_;
};
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
+#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
+#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
+#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
+#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
+#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
+#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
+#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
+#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-typedef XMMRegister DoubleRegister;
+struct X87TopOfStackRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 1;
+ static const int kNumRegisters = 1;
+
+ bool is(X87TopOfStackRegister reg) const {
+ return code_ == reg.code_;
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "st0",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(X87TopOfStackRegister reg) {
+ ASSERT(reg.code() == 0);
+ return 0;
+ }
+};
+
+#define x87tos \
+ static_cast<const X87TopOfStackRegister&>(double_register_0)
+
+
+typedef IntelDoubleRegister DoubleRegister;
enum Condition {
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 01785bb..cadff49 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -574,6 +574,25 @@
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index da8e2ae..23dfc24 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -40,6 +40,18 @@
namespace v8 {
namespace internal {
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -2426,6 +2438,7 @@
__ bind(&loaded);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
@@ -2498,6 +2511,7 @@
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2510,6 +2524,7 @@
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
@@ -2524,6 +2539,7 @@
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
@@ -2556,6 +2572,7 @@
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -4808,10 +4825,17 @@
void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ save_doubles_code = *(save_doubles.GetCode());
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
+ }
}
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 29c16e1..4f8c81f 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -38,7 +38,7 @@
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -61,7 +61,7 @@
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -80,7 +80,7 @@
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -225,7 +225,7 @@
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -247,7 +247,7 @@
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -259,7 +259,7 @@
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -295,7 +295,7 @@
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -320,7 +320,7 @@
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -382,7 +382,7 @@
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -585,7 +585,7 @@
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 2f5553c..9477bf1 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -793,6 +793,50 @@
}
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi index");
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi value");
+
+ __ cmp(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ cmp(index, Immediate(Smi::FromInt(0)));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiUntag(value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ SmiUntag(index);
+ __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
static Operand ExpConstant(int index) {
return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 99ad522..1fbdbfd 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -307,7 +307,7 @@
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@@ -344,7 +344,7 @@
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -455,7 +455,7 @@
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@@ -569,6 +569,70 @@
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-ebp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-esp
+ // | | saved frame (ebp) |
+ // | +=========================+<-ebp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | ebp = saved frame
+ // +-------------------------+ esi = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-esp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<uint32_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+ Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(ebp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(esi.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(edx.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(ecx.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@@ -997,7 +1061,7 @@
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -1012,7 +1076,6 @@
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
Isolate* isolate = masm()->isolate();
@@ -1022,10 +1085,13 @@
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movdbl(Operand(esp, offset), xmm_reg);
+ }
}
__ pushad();
@@ -1073,14 +1139,18 @@
__ pop(Operand(ebx, offset));
}
- // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ movdbl(xmm0, Operand(esp, src_offset));
+ __ movdbl(Operand(ebx, dst_offset), xmm0);
+ }
}
+ __ fninit();
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
@@ -1098,10 +1168,13 @@
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
@@ -1139,31 +1212,39 @@
}
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ }
}
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 6dfec92..fdafe39 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -3074,6 +3074,38 @@
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(ecx);
+ __ pop(ebx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(ecx);
+ __ pop(ebx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index de60451..ff1ec90 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
@@ -70,7 +71,6 @@
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
@@ -79,13 +79,15 @@
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone();
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
@@ -95,7 +97,9 @@
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
}
@@ -126,113 +130,126 @@
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
+ if (dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
- if (dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
}
info()->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ if (info()->IsStub()) {
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ __ push(edi); // Callee's JS function.
+ }
+ }
- if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
__ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- ASSERT_GE(slots, 1);
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ push(Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- #ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
}
- #endif
- }
-
- // Store dynamic frame alignment state in the first local.
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- edx);
} else {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- Immediate(kNoAlignmentPadding));
+ if (FLAG_debug_code) {
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ push(Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
+ }
+
+ // Store dynamic frame alignment state in the first local.
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ edx);
+ } else {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ Immediate(kNoAlignmentPadding));
+ }
}
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -272,7 +289,7 @@
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -326,16 +343,102 @@
}
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ if (jump_table_[i].needs_frame) {
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(esi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(esi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 2 * kPointerSize));
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ }
+ } else {
+ if (jump_table_[i].is_lazy_deopt) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
__ jmp(code->exit());
}
}
@@ -349,6 +452,15 @@
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -364,6 +476,11 @@
}
+bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
+ return op->IsDoubleRegister();
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
@@ -449,7 +566,9 @@
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
@@ -472,6 +591,11 @@
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
}
// Inlined frames which push their arguments cause the index to be
@@ -606,6 +730,8 @@
__ CallRuntime(fun, argc);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
}
@@ -630,6 +756,8 @@
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
}
@@ -675,7 +803,11 @@
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = frame_is_built_
+ ? Deoptimizer::EAGER
+ : Deoptimizer::LAZY;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -709,19 +841,44 @@
__ popfd();
}
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool lazy_deopt_needed = info()->IsStub();
if (cc == no_condition) {
if (FLAG_trap_on_deopt) __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ if (lazy_deopt_needed) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
+ Label done;
if (FLAG_trap_on_deopt) {
- Label done;
__ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
- } else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
}
+ if (!lazy_deopt_needed && frame_is_built_) {
+ if (FLAG_trap_on_deopt) {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (FLAG_trap_on_deopt) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
+ }
+ }
+ __ bind(&done);
}
}
@@ -1422,7 +1579,8 @@
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
+ CpuFeatures::Scope scope1(SSE2);
+ CpuFeatures::Scope scope2(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
@@ -1434,6 +1592,7 @@
__ pinsrd(res, Operand(temp), 1);
}
} else {
+ CpuFeatures::Scope scope(SSE2);
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
@@ -1550,6 +1709,15 @@
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -1587,6 +1755,7 @@
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
@@ -1648,6 +1817,7 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
@@ -1658,8 +1828,8 @@
__ addsd(left, right);
break;
case Token::SUB:
- __ subsd(left, right);
- break;
+ __ subsd(left, right);
+ break;
case Token::MUL:
__ mulsd(left, right);
break;
@@ -1732,6 +1902,7 @@
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
+ CpuFeatures::Scope scope(SSE2);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -1891,6 +2062,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ CpuFeatures::Scope scope(SSE2);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2400,7 +2572,7 @@
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
@@ -2414,8 +2586,10 @@
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- __ mov(esp, ebp);
- __ pop(ebp);
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
@@ -2428,7 +2602,12 @@
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&no_padding);
}
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ if (info()->IsStub()) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ Ret();
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ }
}
@@ -2804,11 +2983,23 @@
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else {
+ __ fld_s(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ __ fld_d(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@@ -2852,9 +3043,30 @@
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
+ if (IsX87TopOfStack(instr->result())) {
+ // Return value is already on stack. If the value has no uses, then
+ // pop it off the FP stack. Otherwise, make sure that there are enough
+ // copies of the value on the stack to feed all of the usages, e.g.
+ // when the following instruction uses the return value in multiple
+ // inputs.
+ int count = instr->hydrogen_value()->UseCount();
+ if (count == 0) {
+ __ fstp(0);
+ } else {
+ count--;
+ ASSERT(count <= 7);
+ while (count-- > 0) {
+ __ fld(0);
+ }
+ }
+ } else {
+ __ fstp_d(ToOperand(instr->result()));
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -2875,7 +3087,14 @@
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- __ movdbl(result, double_load_operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movdbl(result, double_load_operand);
+ } else {
+ __ fld_d(double_load_operand);
+ HandleX87FPReturnValue(instr);
+ }
}
@@ -3291,6 +3510,7 @@
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
+ CpuFeatures::Scope scope(SSE2);
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3312,6 +3532,7 @@
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3376,6 +3597,7 @@
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3421,6 +3643,7 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
@@ -3428,6 +3651,7 @@
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
@@ -3504,6 +3728,7 @@
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
+ CpuFeatures::Scope scope(SSE2);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@@ -3571,6 +3796,7 @@
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
@@ -3602,6 +3828,7 @@
void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
Register temp1 = ToRegister(instr->temp1());
@@ -3870,6 +4097,11 @@
}
DeoptimizeIf(below_equal, instr->environment());
} else {
+ if (instr->hydrogen()->index()->representation().IsTagged() &&
+ !instr->hydrogen()->index()->type().IsSmi()) {
+ __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
DeoptimizeIf(above_equal, instr->environment());
}
@@ -3892,9 +4124,11 @@
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
@@ -3930,6 +4164,7 @@
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
@@ -4180,15 +4415,21 @@
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else {
+ UNREACHABLE();
+ }
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
LOperand* temp = instr->temp();
@@ -4266,9 +4507,21 @@
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ cvtsi2sd(xmm0, Operand(reg));
+ } else {
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ }
} else {
- __ LoadUint32(xmm0, reg, xmm1);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ LoadUint32(xmm0, reg, xmm1);
+ } else {
+ UNREACHABLE();
+ }
}
if (FLAG_inline_new) {
@@ -4297,7 +4550,12 @@
// Done. Put the value in xmm0 into the value of the allocated heap
// number.
__ bind(&done);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
__ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4313,7 +4571,6 @@
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
@@ -4324,7 +4581,16 @@
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ } else {
+ if (!IsX87TopOfStack(instr->value())) {
+ __ fld_d(ToOperand(instr->value()));
+ }
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
}
@@ -4468,6 +4734,7 @@
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
@@ -4481,7 +4748,8 @@
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
}
- } else {
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
// Deoptimize if we don't have a heap number.
__ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
@@ -4503,6 +4771,8 @@
__ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
+ } else {
+ UNREACHABLE();
}
__ bind(&done);
}
@@ -4545,19 +4815,24 @@
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment());
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
+ instr->environment());
+ } else {
+ UNIMPLEMENTED();
+ }
}
@@ -4566,6 +4841,7 @@
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
@@ -4755,10 +5031,10 @@
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -4776,12 +5052,13 @@
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@@ -4796,6 +5073,8 @@
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
+
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
Label is_smi, done, heap_number;
@@ -4842,7 +5121,7 @@
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
@@ -4852,7 +5131,7 @@
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
}
@@ -5389,13 +5668,15 @@
void LCodeGen::EnsureSpaceForLazyDeopt() {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ __ Nop(padding_size);
+ }
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 44ddaff..63d15f4 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -55,6 +55,7 @@
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@@ -64,6 +65,7 @@
dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -78,10 +80,20 @@
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsX87TopOfStack(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
Immediate ToInteger32Immediate(LOperand* op) const {
@@ -90,6 +102,9 @@
Handle<Object> ToHandle(LConstantOperand* op) const;
+ // A utility for instructions that return floating point values on X87.
+ void HandleX87FPReturnValue(LInstruction* instr);
+
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
@@ -122,7 +137,7 @@
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -172,7 +187,7 @@
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -184,9 +199,7 @@
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
- // Pad the reloc info to ensure that we have enough space to patch during
- // deoptimization.
- bool GenerateRelocPadding();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
@@ -356,10 +369,23 @@
MacroAssembler* const masm_;
CompilationInfo* const info_;
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+ : label(),
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
+ Label label;
+ Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
+ };
+
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
@@ -369,6 +395,7 @@
bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -386,6 +413,7 @@
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 6428916..7cb15e6 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -191,7 +191,7 @@
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
@@ -204,7 +204,7 @@
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
@@ -256,7 +256,7 @@
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
@@ -324,29 +324,38 @@
}
} else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+ }
} else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ UNREACHABLE();
}
} else if (source->IsDoubleStackSlot()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movdbl(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(xmm0, src);
+ __ movdbl(dst, xmm0);
+ }
} else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ UNREACHABLE();
}
-
} else {
UNREACHABLE();
}
@@ -410,6 +419,7 @@
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
index 0c81d72..3a58f58 100644
--- a/src/ia32/lithium-gap-resolver-ia32.h
+++ b/src/ia32/lithium-gap-resolver-ia32.h
@@ -97,8 +97,8 @@
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumAllocatableRegisters];
- int destination_uses_[Register::kNumAllocatableRegisters];
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 65a300e..fd9cc94 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -44,10 +44,10 @@
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -460,9 +460,11 @@
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -494,6 +496,12 @@
}
+LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ X87TopOfStackRegister::ToAllocationIndex(reg));
+}
+
+
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@@ -626,6 +634,13 @@
}
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineX87TOS(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, ToUnallocated(x87tos));
+}
+
+
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@@ -638,6 +653,8 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -1643,6 +1660,17 @@
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(ecx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), ecx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1680,8 +1708,12 @@
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = instr->deoptimize_on_minus_zero()
@@ -1706,7 +1738,10 @@
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = CpuFeatures::IsSupported(SSE2)
+ ? UseRegisterAtStart(instr->value())
+ : UseAtStart(instr->value());
LOperand* temp = TempRegister();
// Make sure that temp and result_temp are different registers.
@@ -1724,6 +1759,7 @@
DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -2240,8 +2276,17 @@
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2342,6 +2387,7 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 2067c62..f4056c1 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -144,6 +144,7 @@
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -249,7 +250,11 @@
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ virtual bool ClobbersDoubleRegisters() const {
+ return is_call_ || !CpuFeatures::IsSupported(SSE2);
+ }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
@@ -355,6 +360,7 @@
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
@@ -1179,6 +1185,30 @@
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
@@ -1413,7 +1443,6 @@
inputs_[0] = elements;
inputs_[1] = key;
}
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
@@ -1423,11 +1452,18 @@
return hydrogen()->is_external();
}
+ virtual bool ClobbersDoubleRegisters() const {
+ return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
};
@@ -2408,8 +2444,9 @@
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2573,6 +2610,7 @@
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
+ LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2633,6 +2671,8 @@
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
+ template<int I, int T>
+ LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 14fb8ca..e9ce797 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1801,7 +1801,8 @@
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
+ : kDontSaveFPRegs);
CallStub(&ces);
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 7abb29b..79960ec 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -924,9 +924,9 @@
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index c8695c5..7834627 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -3398,9 +3398,17 @@
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ }
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3661,157 +3669,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, failed_allocation, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- __ mov(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ fld_s(Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ fld_d(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // eax: value
- // For floating-point array type:
- // FP(0): value
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ cmp(eax, 0xc0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &box_int);
- }
-
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(eax);
- __ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
- }
- // FP(0): value
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ fstp(0);
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -4011,106 +3868,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(eax, ecx, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &miss_force_generic);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
- FixedDoubleArray::kHeaderSize));
- } else {
- __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
- // Set the value.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- // A value was pushed on the floating point stack before the allocation, if
- // the allocation fails it needs to be removed.
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ fstp(0);
- }
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
diff --git a/src/ic.cc b/src/ic.cc
index bf2a649..3633036 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1054,7 +1054,13 @@
ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
- return KeyedLoadElementStub(elements_kind).GetCode();
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind)) {
+ return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
+ } else {
+ ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+ return KeyedLoadDictionaryElementStub().GetCode();
+ }
}
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 1457ee3..ef7dbe0 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -881,7 +881,7 @@
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
+ marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
diff --git a/src/isolate.cc b/src/isolate.cc
index 57809ce..d09625c 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -426,11 +426,6 @@
}
-void Isolate::IterateThread(ThreadVisitor* v) {
- v->VisitThread(this, thread_local_top());
-}
-
-
void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(this, thread);
@@ -1640,6 +1635,7 @@
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
+ code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
deferred_handles_head_(NULL),
optimizing_compiler_thread_(this) {
@@ -1802,6 +1798,9 @@
delete date_cache_;
date_cache_ = NULL;
+ delete[] code_stub_interface_descriptors_;
+ code_stub_interface_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -1965,6 +1964,10 @@
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
+ code_stub_interface_descriptors_ =
+ new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+ memset(code_stub_interface_descriptors_, 0,
+ kPointerSize * CodeStub::NUMBER_OF_IDS);
// Enable logging before setting up the heap
logger_->SetUp();
@@ -2025,6 +2028,8 @@
debug_->SetUp(create_heap_objects);
#endif
+ deoptimizer_data_ = new DeoptimizerData;
+
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize();
@@ -2043,7 +2048,6 @@
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
- deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
@@ -2065,6 +2069,17 @@
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+
+ if (!create_heap_objects) {
+ // Now that the heap is consistent, it's OK to generate the code for the
+ // deopt entry table that might have been referred to by optimized code in
+ // the snapshot.
+ HandleScope scope(this);
+ Deoptimizer::EnsureCodeForDeoptimizationEntry(
+ Deoptimizer::LAZY,
+ kDeoptTableSerializeEntryCount - 1);
+ }
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}
@@ -2179,6 +2194,12 @@
}
+CodeStubInterfaceDescriptor*
+ Isolate::code_stub_interface_descriptor(int index) {
+ return code_stub_interface_descriptors_ + index;
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/isolate.h b/src/isolate.h
index 921add3..6bbe5eb 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -53,6 +53,7 @@
class Bootstrapper;
class CodeGenerator;
class CodeRange;
+struct CodeStubInterfaceDescriptor;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
@@ -770,7 +771,6 @@
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
@@ -923,10 +923,6 @@
bool fp_stubs_generated() { return fp_stubs_generated_; }
- StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
- return &compiler_safe_string_input_buffer_;
- }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -1064,6 +1060,9 @@
date_cache_ = date_cache;
}
+ CodeStubInterfaceDescriptor*
+ code_stub_interface_descriptor(int index);
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1233,7 +1232,6 @@
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
- StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
@@ -1247,6 +1245,7 @@
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 09014f0..7a8af30 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -597,31 +597,8 @@
void BasicJsonStringifier::ShrinkCurrentPart() {
ASSERT(current_index_ < part_length_);
- if (current_index_ == 0) {
- current_part_ = factory_->empty_string();
- return;
- }
-
- int string_size, allocated_string_size;
- if (is_ascii_) {
- allocated_string_size = SeqOneByteString::SizeFor(part_length_);
- string_size = SeqOneByteString::SizeFor(current_index_);
- } else {
- allocated_string_size = SeqTwoByteString::SizeFor(part_length_);
- string_size = SeqTwoByteString::SizeFor(current_index_);
- }
-
- int delta = allocated_string_size - string_size;
- current_part_->set_length(current_index_);
-
- // String sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- Address end_of_string = current_part_->address() + string_size;
- isolate_->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*current_part_))) {
- MemoryChunk::IncrementLiveBytesFromMutator(
- current_part_->address(), -delta);
- }
+ current_part_ = Handle<String>(
+ SeqString::cast(*current_part_)->Truncate(current_index_), isolate_);
}
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 91a9811..b23c867 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -606,7 +606,7 @@
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumAllocatableRegisters;
+ return -index - 1 - Register::kMaxNumAllocatableRegisters;
}
@@ -638,7 +638,7 @@
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kNumAllocatableRegisters);
+ ASSERT(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
@@ -651,7 +651,7 @@
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+ ASSERT(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
@@ -768,6 +768,7 @@
void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
+ if (start == -1) return;
for (int i = start; i <= end; ++i) {
if (IsGapAt(i)) {
LInstruction* instr = NULL;
@@ -946,8 +947,8 @@
Define(curr_position, output, NULL);
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
@@ -958,8 +959,8 @@
}
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -989,7 +990,7 @@
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LOperand* temp = it.Current();
- if (instr->IsMarkedAsCall()) {
+ if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1324,8 +1325,14 @@
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ if (chunk_->info()->IsStub()) {
+ CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+ PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+ } else {
+ ASSERT(chunk_->info()->IsOptimizing());
+ PrintF("Function: %s\n",
+ *chunk_->info()->function()->debug_name()->ToCString());
+ }
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1471,14 +1478,14 @@
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
+ num_registers_ = Register::NumAllocatableRegisters();
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+ num_registers_ = DoubleRegister::NumAllocatableRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1757,14 +1764,14 @@
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+ Register::kMaxNumAllocatableRegisters);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1853,10 +1860,10 @@
}
- LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 5b05263..0dd192d 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -608,9 +608,9 @@
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
diff --git a/src/lithium.cc b/src/lithium.cc
index eb2198d..7ad175e 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -414,7 +414,7 @@
}
-Handle<Code> LChunk::Codegen() {
+Handle<Code> LChunk::Codegen(Code::Kind kind) {
MacroAssembler assembler(info()->isolate(), NULL, 0);
LCodeGen generator(this, &assembler, info());
@@ -425,7 +425,7 @@
PrintF("Crankshaft Compiler - ");
}
CodeGenerator::MakeCodePrologue(info());
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(kind);
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
diff --git a/src/lithium.h b/src/lithium.h
index b4eb2bb..222e893 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -682,7 +682,7 @@
Zone* zone() const { return info_->zone(); }
- Handle<Code> Codegen();
+ Handle<Code> Codegen(Code::Kind kind);
protected:
LChunk(CompilationInfo* info, HGraph* graph)
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 246f685..d8d92cb 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -67,7 +67,7 @@
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_regexp = true;
- FLAG_log_timer_events = true;
+ FLAG_log_internal_timer_events = true;
}
// --prof implies --log-code.
@@ -82,7 +82,7 @@
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
|| FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_timer_events;
+ || FLAG_log_internal_timer_events;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
diff --git a/src/log.cc b/src/log.cc
index 76af400..9a3c54d 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -707,7 +707,7 @@
void Logger::TimerEvent(const char* name, int64_t start, int64_t end) {
if (!log_->IsEnabled()) return;
- ASSERT(FLAG_log_timer_events);
+ ASSERT(FLAG_log_internal_timer_events);
LogMessageBuilder msg(this);
int since_epoch = static_cast<int>(start - epoch_);
int pause_time = static_cast<int>(end - start);
@@ -1379,8 +1379,7 @@
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp);
- msg.Append(",%ld",
- FLAG_log_timer_events ? static_cast<int>(OS::Ticks() - epoch_) : 0);
+ msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1537,6 +1536,7 @@
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
+ case Code::COMPILED_STUB: // fall through
case Code::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
@@ -1788,7 +1788,7 @@
bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
|| FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_timer_events;
+ || FLAG_log_internal_timer_events;
if (start_logging) {
logging_nesting_ = 1;
@@ -1806,7 +1806,7 @@
}
}
- if (FLAG_log_timer_events) epoch_ = OS::Ticks();
+ if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
return true;
}
diff --git a/src/log.h b/src/log.h
index 12dc923..5fab4fa 100644
--- a/src/log.h
+++ b/src/log.h
@@ -287,11 +287,11 @@
public:
TimerEventScope(Isolate* isolate, const char* name)
: isolate_(isolate), name_(name), start_(0) {
- if (FLAG_log_timer_events) start_ = OS::Ticks();
+ if (FLAG_log_internal_timer_events) start_ = OS::Ticks();
}
~TimerEventScope() {
- if (FLAG_log_timer_events) LogTimerEvent();
+ if (FLAG_log_internal_timer_events) LogTimerEvent();
}
void LogTimerEvent();
diff --git a/src/macros.py b/src/macros.py
index 08fa82e..f871fc5 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -32,6 +32,8 @@
const READ_ONLY = 1;
const DONT_ENUM = 2;
const DONT_DELETE = 4;
+const NEW_ONE_BYTE_STRING = true;
+const NEW_TWO_BYTE_STRING = false;
# Constants used for getter and setter operations.
const GETTER = 0;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a92bcca..8ca14db 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -939,6 +939,10 @@
ASSERT(!function->next_function_link()->IsUndefined());
Object* undefined = isolate_->heap()->undefined_value();
+ // The function is no longer a candidate, make sure it gets visited
+ // again so that previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(function);
+
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
if (candidate == function) {
diff --git a/src/math.js b/src/math.js
index aee56af..4686328 100644
--- a/src/math.js
+++ b/src/math.js
@@ -131,19 +131,16 @@
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- if (length == 0) {
- return -1/0; // Compiler constant-folds this to -Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = -1/0; // Compiler constant-folds this to -Infinity.
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
- if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n > r ||
+ (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
+ r = n;
+ }
}
return r;
}
@@ -164,19 +161,16 @@
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- if (length == 0) {
- return 1/0; // Compiler constant-folds this to Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = 1/0; // Compiler constant-folds this to Infinity.
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
// Smi or a heap number.
- if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n < r ||
+ (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
+ r = n;
+ }
}
return r;
}
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0ed2414..13b0b43 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -70,6 +70,8 @@
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_METHODS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/object-observe.js b/src/object-observe.js
index c9ae652..8c2895f 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -49,7 +49,7 @@
%ObjectHashTableSet(observationState[this.tableName], key, value);
},
has: function(key) {
- return %ObjectHashTableHas(observationState[this.tableName], key);
+ return !IS_UNDEFINED(this.get(key));
}
};
diff --git a/src/objects-inl.h b/src/objects-inl.h
index b99ba44..9ff35ef 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2512,6 +2512,77 @@
}
+template<class Visitor, class ConsOp>
+void String::Visit(
+ String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& consOp,
+ int32_t type,
+ unsigned length) {
+
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ ASSERT(offset <= length);
+
+ unsigned sliceOffset = offset;
+ while (true) {
+ ASSERT(type == string->map()->instance_type());
+
+ switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ case kSeqStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ reinterpret_cast<const uint8_t*>(
+ SeqOneByteString::cast(string)->GetChars()) + sliceOffset,
+ length - offset);
+ return;
+
+ case kSeqStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ reinterpret_cast<const uint16_t*>(
+ SeqTwoByteString::cast(string)->GetChars()) + sliceOffset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ reinterpret_cast<const uint8_t*>(
+ ExternalAsciiString::cast(string)->GetChars()) + sliceOffset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ reinterpret_cast<const uint16_t*>(
+ ExternalTwoByteString::cast(string)->GetChars()) + sliceOffset,
+ length - offset);
+ return;
+
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag: {
+ SlicedString* slicedString = SlicedString::cast(string);
+ sliceOffset += slicedString->offset();
+ string = slicedString->parent();
+ type = string->map()->instance_type();
+ continue;
+ }
+
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag:
+ string = consOp.Operate(ConsString::cast(string), &offset, &type,
+ &length);
+ if (string == NULL) return;
+ sliceOffset = offset;
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ continue;
+
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+}
+
+
uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2690,6 +2761,146 @@
}
+unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
+ return depth & kDepthMask;
+}
+
+
+uint32_t ConsStringIteratorOp::MaskForDepth(unsigned depth) {
+ return 1 << OffsetForDepth(depth);
+}
+
+
+void ConsStringIteratorOp::SetRightDescent() {
+ trace_ |= MaskForDepth(depth_ - 1);
+}
+
+
+void ConsStringIteratorOp::ClearRightDescent() {
+ trace_ &= ~MaskForDepth(depth_ - 1);
+}
+
+
+void ConsStringIteratorOp::PushLeft(ConsString* string) {
+ frames_[depth_++ & kDepthMask] = string;
+}
+
+
+void ConsStringIteratorOp::PushRight(ConsString* string, int32_t type) {
+ // Inplace update
+ frames_[(depth_-1) & kDepthMask] = string;
+ if (depth_ != 1) return;
+ // Optimization: can replace root in this case.
+ root_ = string;
+ root_type_ = type;
+ root_length_ = string->length();
+}
+
+
+void ConsStringIteratorOp::AdjustMaximumDepth() {
+ if (depth_ > maximum_depth_) maximum_depth_ = depth_;
+}
+
+
+void ConsStringIteratorOp::Pop() {
+ ASSERT(depth_ > 0);
+ ASSERT(depth_ <= maximum_depth_);
+ depth_--;
+}
+
+
+void ConsStringIteratorOp::Reset() {
+ consumed_ = 0;
+ ResetStack();
+}
+
+
+bool ConsStringIteratorOp::HasMore() {
+ return depth_ != 0;
+}
+
+
+void ConsStringIteratorOp::ResetStack() {
+ depth_ = 0;
+ maximum_depth_ = 0;
+}
+
+
+bool ConsStringIteratorOp::ContinueOperation(ContinueResponse* response) {
+ bool blewStack;
+ int32_t type;
+ String* string = NextLeaf(&blewStack, &type);
+ // String found.
+ if (string != NULL) {
+ unsigned length = string->length();
+ consumed_ += length;
+ response->string_ = string;
+ response->offset_ = 0;
+ response->length_ = length;
+ response->type_ = type;
+ return true;
+ }
+ // Traversal complete.
+ if (!blewStack) return false;
+ // Restart search.
+ ResetStack();
+ response->string_ = root_;
+ response->offset_ = consumed_;
+ response->length_ = root_length_;
+ response->type_ = root_type_;
+ return true;
+}
+
+
+uint16_t StringCharacterStream::GetNext() {
+ ASSERT(buffer8_ != NULL);
+ return is_one_byte_ ? *buffer8_++ : *buffer16_++;
+}
+
+
+StringCharacterStream::StringCharacterStream(
+ String* string, unsigned offset, ConsStringIteratorOp* op)
+ : is_one_byte_(true),
+ buffer8_(NULL),
+ end_(NULL),
+ op_(op) {
+ op->Reset();
+ String::Visit(string,
+ offset, *this, *op, string->map()->instance_type(), string->length());
+}
+
+
+bool StringCharacterStream::HasMore() {
+ if (buffer8_ != end_) return true;
+ if (!op_->HasMore()) return false;
+ ConsStringIteratorOp::ContinueResponse response;
+ // This has been checked above
+ if (!op_->ContinueOperation(&response)) {
+ UNREACHABLE();
+ return false;
+ }
+ String::Visit(response.string_,
+ response.offset_, *this, *op_, response.type_, response.length_);
+ return true;
+}
+
+
+void StringCharacterStream::VisitOneByteString(
+ const uint8_t* chars, unsigned length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ end_ = chars + length;
+}
+
+
+void StringCharacterStream::VisitTwoByteString(
+ const uint16_t* chars, unsigned length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ end_ = reinterpret_cast<const uint8_t*>(chars + length);
+}
+
+
void JSFunctionResultCache::MakeZeroSize() {
set_finger_index(kEntriesIndex);
set_size(kEntriesIndex);
@@ -3225,6 +3436,7 @@
int Code::major_key() {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
@@ -3236,6 +3448,7 @@
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
@@ -3344,7 +3557,7 @@
unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
@@ -3352,7 +3565,7 @@
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3360,7 +3573,7 @@
unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@@ -3368,7 +3581,7 @@
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);
@@ -4741,6 +4954,13 @@
}
+void JSRegExp::ResetLastIndex() {
+ InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0),
+ SKIP_WRITE_BARRIER); // It's a Smi.
+}
+
+
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
#if DEBUG
diff --git a/src/objects.cc b/src/objects.cc
index c5ab315..f5a7063 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1753,6 +1753,9 @@
Isolate* isolate = object->GetIsolate();
HandleScope scope;
Handle<String> type = isolate->factory()->LookupAsciiSymbol(type_str);
+ if (object->IsJSGlobalObject()) {
+ object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
+ }
Handle<Object> args[] = { type, object, name, old_value };
bool threw;
Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
@@ -7013,8 +7016,128 @@
}
-void SafeStringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
+String* ConsStringIteratorOp::Operate(ConsString* consString,
+ unsigned* outerOffset, int32_t* typeOut, unsigned* lengthOut) {
+ ASSERT(*lengthOut == (unsigned)consString->length());
+ // Push the root string.
+ PushLeft(consString);
+ root_ = consString;
+ root_type_ = *typeOut;
+ root_length_ = *lengthOut;
+ unsigned targetOffset = *outerOffset;
+ unsigned offset = 0;
+ while (true) {
+ // Loop until the string is found which contains the target offset.
+ String* string = consString->first();
+ unsigned length = string->length();
+ int32_t type;
+ if (targetOffset < offset + length) {
+ // Target offset is in the left branch.
+ // Mark the descent.
+ ClearRightDescent();
+ // Keep going if we're still in a ConString.
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ consString = ConsString::cast(string);
+ PushLeft(consString);
+ continue;
+ }
+ } else {
+ // Descend right.
+ // Update progress through the string.
+ offset += length;
+ // Keep going if we're still in a ConString.
+ string = consString->second();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ consString = ConsString::cast(string);
+ PushRight(consString, type);
+ continue;
+ }
+ // Mark the descent.
+ SetRightDescent();
+ // Need this to be updated for the current string.
+ length = string->length();
+ // Account for the possibility of an empty right leaf.
+ while (length == 0) {
+ bool blewStack;
+ // Need to adjust maximum depth for NextLeaf to work.
+ AdjustMaximumDepth();
+ string = NextLeaf(&blewStack, &type);
+ if (string == NULL) {
+ // Luckily, this case is impossible.
+ ASSERT(!blewStack);
+ return NULL;
+ }
+ length = string->length();
+ }
+ }
+ // Tell the stack we're done decending.
+ AdjustMaximumDepth();
+ ASSERT(length != 0);
+ // Adjust return values and exit.
+ unsigned innerOffset = targetOffset - offset;
+ consumed_ += length - innerOffset;
+ *outerOffset = innerOffset;
+ *typeOut = type;
+ *lengthOut = length;
+ return string;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+String* ConsStringIteratorOp::NextLeaf(bool* blewStack, int32_t* typeOut) {
+ while (true) {
+ // Tree traversal complete.
+ if (depth_ == 0) {
+ *blewStack = false;
+ return NULL;
+ }
+ // We've lost track of higher nodes.
+ if (maximum_depth_ - depth_ == kStackSize) {
+ *blewStack = true;
+ return NULL;
+ }
+ // Check if we're done with this level.
+ bool haveAlreadyReadRight = trace_ & MaskForDepth(depth_ - 1);
+ if (haveAlreadyReadRight) {
+ Pop();
+ continue;
+ }
+ // Go right.
+ ConsString* consString = frames_[OffsetForDepth(depth_ - 1)];
+ String* string = consString->second();
+ int32_t type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ // Don't need to mark the descent here.
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ *typeOut = type;
+ return string;
+ }
+ // No need to mark the descent.
+ consString = ConsString::cast(string);
+ PushRight(consString, type);
+ // Need to traverse all the way left.
+ while (true) {
+ // Continue left.
+ // Update marker.
+ ClearRightDescent();
+ string = consString->first();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ AdjustMaximumDepth();
+ *typeOut = type;
+ return string;
+ }
+ consString = ConsString::cast(string);
+ PushLeft(consString);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
}
@@ -7622,6 +7745,36 @@
}
+String* SeqString::Truncate(int new_length) {
+ Heap* heap = GetHeap();
+ if (new_length <= 0) return heap->empty_string();
+
+ int string_size, allocated_string_size;
+ int old_length = length();
+ if (old_length <= new_length) return this;
+
+ if (IsSeqOneByteString()) {
+ allocated_string_size = SeqOneByteString::SizeFor(old_length);
+ string_size = SeqOneByteString::SizeFor(new_length);
+ } else {
+ allocated_string_size = SeqTwoByteString::SizeFor(old_length);
+ string_size = SeqTwoByteString::SizeFor(new_length);
+ }
+
+ int delta = allocated_string_size - string_size;
+ set_length(new_length);
+
+ // String sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ Address end_of_string = address() + string_size;
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(address(), -delta);
+ }
+ return this;
+}
+
+
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -8978,6 +9131,12 @@
break;
}
+ case Translation::COMPILED_STUB_FRAME: {
+ Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+ PrintF(out, "{kind=%d}", stub_kind);
+ break;
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
int function_id = iterator.Next();
@@ -9092,6 +9251,7 @@
switch (kind) {
case FUNCTION: return "FUNCTION";
case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
+ case COMPILED_STUB: return "COMPILED_STUB";
case STUB: return "STUB";
case BUILTIN: return "BUILTIN";
case LOAD_IC: return "LOAD_IC";
@@ -9211,7 +9371,7 @@
}
PrintF("\n");
- if (kind() == OPTIMIZED_FUNCTION) {
+ if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
SafepointTable table(this);
PrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {
diff --git a/src/objects.h b/src/objects.h
index 9737e7f..e3a0c7f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -4233,6 +4233,7 @@
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
+ V(COMPILED_STUB) \
V(BUILTIN) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@@ -4848,6 +4849,10 @@
return IsFastDoubleElementsKind(elements_kind());
}
+ inline bool has_fast_elements() {
+ return IsFastElementsKind(elements_kind());
+ }
+
inline bool has_non_strict_arguments_elements() {
return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
}
@@ -6646,6 +6651,7 @@
inline Object* DataAtUnchecked(int index);
inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
inline Type TypeTagUnchecked();
+ inline void ResetLastIndex();
static int code_index(bool is_ascii) {
if (is_ascii) {
@@ -7167,6 +7173,8 @@
// All string values have a length field.
class String: public HeapObject {
public:
+ enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
+
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -7505,6 +7513,14 @@
return NonAsciiStart(chars, length) >= length;
}
+ template<class Visitor, class ConsOp>
+ static inline void Visit(String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& consOp,
+ int32_t type,
+ unsigned length);
+
protected:
class ReadBlockBuffer {
public:
@@ -7563,6 +7579,11 @@
// Layout description.
static const int kHeaderSize = String::kSize;
+ // Truncate the string in-place if possible and return the result.
+ // In case of new_length == 0, the empty string is returned without
+ // truncating the original string.
+ MUST_USE_RESULT String* Truncate(int new_length);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -7954,14 +7975,75 @@
};
-class SafeStringInputBuffer
- : public unibrow::InputBuffer<String, String**, 256> {
+// This maintains an off-stack representation of the stack frames required
+// to traverse a ConsString, allowing an entirely iterative and restartable
+// traversal of the entire string
+// Note: this class is not GC-safe.
+class ConsStringIteratorOp {
public:
- virtual void Seek(unsigned pos);
- inline SafeStringInputBuffer()
- : unibrow::InputBuffer<String, String**, 256>() {}
- explicit inline SafeStringInputBuffer(String** backing)
- : unibrow::InputBuffer<String, String**, 256>(backing) {}
+ struct ContinueResponse {
+ String* string_;
+ unsigned offset_;
+ unsigned length_;
+ int32_t type_;
+ };
+ inline ConsStringIteratorOp() {}
+ String* Operate(ConsString* consString, unsigned* outerOffset,
+ int32_t* typeOut, unsigned* lengthOut);
+ inline bool ContinueOperation(ContinueResponse* response);
+ inline void Reset();
+ inline bool HasMore();
+
+ private:
+ // TODO(dcarney): Templatize this out for different stack sizes.
+ static const unsigned kStackSize = 32;
+ // Use a mask instead of doing modulo operations for stack wrapping.
+ static const unsigned kDepthMask = kStackSize-1;
+ STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
+ static inline unsigned OffsetForDepth(unsigned depth);
+ static inline uint32_t MaskForDepth(unsigned depth);
+
+ inline void ClearRightDescent();
+ inline void SetRightDescent();
+ inline void PushLeft(ConsString* string);
+ inline void PushRight(ConsString* string, int32_t type);
+ inline void AdjustMaximumDepth();
+ inline void Pop();
+ inline void ResetStack();
+ String* NextLeaf(bool* blewStack, int32_t* typeOut);
+
+ unsigned depth_;
+ unsigned maximum_depth_;
+ uint32_t trace_;
+ ConsString* frames_[kStackSize];
+ unsigned consumed_;
+ ConsString* root_;
+ int32_t root_type_;
+ unsigned root_length_;
+ DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
+};
+
+
+// Note: this class is not GC-safe.
+class StringCharacterStream {
+ public:
+ inline StringCharacterStream(
+ String* string, unsigned offset, ConsStringIteratorOp* op);
+ inline uint16_t GetNext();
+ inline bool HasMore();
+ inline void Reset(String* string, unsigned offset, ConsStringIteratorOp* op);
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length);
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
+
+ private:
+ bool is_one_byte_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ const uint8_t* end_;
+ ConsStringIteratorOp* op_;
+ DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 2d56d1a..7aad78c 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class OptimizingCompiler;
class SharedFunctionInfo;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 602fbb4..c0fdf48 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -42,6 +42,7 @@
output_ = NULL;
size_ = 0;
pos_ = 0;
+ InitializeAstVisitor();
}
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 9ac7257..41175ab 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -74,6 +74,8 @@
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 85b6544..6e49c7b 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1644,12 +1644,14 @@
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress)
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver)
: heap_(Isolate::Current()->heap()),
snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
- filler_(NULL) {
+ filler_(NULL),
+ global_object_name_resolver_(resolver) {
}
@@ -2712,21 +2714,30 @@
isolate->factory()->NewStringFromAscii(CStrVector("URL"));
const char** urls = NewArray<const char*>(enumerator.count());
for (int i = 0, l = enumerator.count(); i < l; ++i) {
- urls[i] = NULL;
- HandleScope scope;
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- Object* obj_document;
- if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
- obj_document->IsJSObject()) {
- // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
- // As result GetProperty(*url_string) will crash.
- if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
- continue;
- JSObject* document = JSObject::cast(obj_document);
- Object* obj_url;
- if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
- obj_url->IsString()) {
- urls[i] = collection_->names()->GetName(String::cast(obj_url));
+ if (global_object_name_resolver_) {
+ HandleScope scope;
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ urls[i] = global_object_name_resolver_->GetName(
+ Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
+ } else {
+ // TODO(yurys): This branch is going to be removed once Chromium migrates
+ // to the new name resolver.
+ urls[i] = NULL;
+ HandleScope scope;
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ Object* obj_document;
+ if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
+ obj_document->IsJSObject()) {
+ // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
+ // As result GetProperty(*url_string) will crash.
+ if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
+ continue;
+ JSObject* document = JSObject::cast(obj_document);
+ Object* obj_url;
+ if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
+ obj_url->IsString()) {
+ urls[i] = collection_->names()->GetName(String::cast(obj_url));
+ }
}
}
}
@@ -3081,11 +3092,13 @@
};
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control)
+HeapSnapshotGenerator::HeapSnapshotGenerator(
+ HeapSnapshot* snapshot,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver)
: snapshot_(snapshot),
control_(control),
- v8_heap_explorer_(snapshot_, this),
+ v8_heap_explorer_(snapshot_, this, resolver),
dom_explorer_(snapshot_, this) {
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 04f4a1c..f306659 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -851,7 +851,8 @@
class V8HeapExplorer : public HeapEntriesAllocator {
public:
V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
void AddRootEntries(SnapshotFillerInterface* filler);
@@ -945,6 +946,7 @@
SnapshotFillerInterface* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
+ v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
static HeapObject* const kGcRootsObject;
static HeapObject* const kFirstGcSubrootObject;
@@ -1021,7 +1023,8 @@
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control);
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
bool GenerateSnapshot();
private:
diff --git a/src/regexp.js b/src/regexp.js
index a3675f0..da1883f 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -161,6 +161,7 @@
lastMatchInfoOverride = null;
return BuildResultFromMatchInfo(matchInfo, string);
}
+ regexp.lastIndex = 0;
return null;
}
@@ -193,7 +194,7 @@
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
- if (global) this.lastIndex = 0;
+ this.lastIndex = 0;
return null;
}
@@ -256,7 +257,10 @@
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) return false;
+ if (matchIndices === null) {
+ this.lastIndex = 0;
+ return false;
+ }
lastMatchInfoOverride = null;
return true;
}
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 2a98787..02907de 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -43,7 +43,9 @@
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate(), zone) { }
+ factory_(Isolate::Current(), zone) {
+ InitializeAstVisitor();
+ }
virtual ~Processor() { }
@@ -86,6 +88,8 @@
#undef DEF_VISIT
void VisitIterationStatement(IterationStatement* stmt);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
diff --git a/src/runtime.cc b/src/runtime.cc
index 5cf2d44..9f5e78d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1790,9 +1790,7 @@
JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
+ regexp->ResetLastIndex();
return regexp;
}
@@ -2904,7 +2902,10 @@
isolate, *subject, pattern, &indices, 0xffffffff, zone);
int matches = indices.length();
- if (matches == 0) return *subject;
+ if (matches == 0) {
+ pattern_regexp->ResetLastIndex();
+ return *subject;
+ }
// Detect integer overflow.
int64_t result_len_64 =
@@ -3004,6 +3005,7 @@
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
if (global_cache.HasException()) return Failure::Exception();
+ regexp->ResetLastIndex();
return *subject;
}
@@ -3102,6 +3104,7 @@
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
if (global_cache.HasException()) return Failure::Exception();
+ regexp->ResetLastIndex();
return *subject;
}
@@ -5098,46 +5101,22 @@
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSArray, codes, 0);
- int length = Smi::cast(codes->length())->value();
-
- // Check if the string can be ASCII.
- int i;
- for (i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- // We probably can't get an exception here, but just in order to enforce
- // the checking of inputs in the runtime calls we check here.
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- if ((chr & 0xffff) > String::kMaxAsciiCharCode)
- break;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
+ if (length == 0) return isolate->heap()->empty_string();
+ if (is_one_byte) {
+ return isolate->heap()->AllocateRawOneByteString(length);
+ } else {
+ return isolate->heap()->AllocateRawTwoByteString(length);
}
+}
- MaybeObject* maybe_object = NULL;
- if (i == length) { // The string is ASCII.
- maybe_object = isolate->heap()->AllocateRawOneByteString(length);
- } else { // The string is not ASCII.
- maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
- }
- Object* object = NULL;
- if (!maybe_object->ToObject(&object)) return maybe_object;
- String* result = String::cast(object);
- for (int i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- result->Set(i, chr & 0xffff);
- }
- return result;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
+ CONVERT_ARG_CHECKED(SeqString, string, 0);
+ CONVERT_SMI_ARG_CHECKED(new_length, 1);
+ return string->Truncate(new_length);
}
@@ -7937,6 +7916,17 @@
};
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
+ delete deoptimizer;
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -7945,9 +7935,11 @@
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
- JavaScriptFrameIterator it(isolate);
+
+ ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
// Make sure to materialize objects before causing any allocation.
+ JavaScriptFrameIterator it(isolate);
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
@@ -13402,6 +13394,12 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (obj->IsNull()) return isolate->heap()->false_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSReceiver::cast(proto);
+ }
return isolate->heap()->ToBoolean(obj->map()->is_observed());
}
@@ -13410,6 +13408,12 @@
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (obj->IsNull()) return isolate->heap()->undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSReceiver::cast(proto);
+ }
if (obj->map()->is_observed() != is_observed) {
MaybeObject* maybe = obj->map()->Copy();
Map* map;
@@ -13445,6 +13449,10 @@
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
Object* key = args[1];
+ if (key->IsJSGlobalProxy()) {
+ key = key->GetPrototype();
+ if (key->IsNull()) return isolate->heap()->undefined_value();
+ }
Object* lookup = table->Lookup(key);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : lookup;
}
@@ -13455,21 +13463,15 @@
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(ObjectHashTable, table, 0);
Handle<Object> key = args.at<Object>(1);
+ if (key->IsJSGlobalProxy()) {
+ key = handle(key->GetPrototype(), isolate);
+ if (key->IsNull()) return *table;
+ }
Handle<Object> value = args.at<Object>(2);
return *PutIntoObjectHashTable(table, key, value);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableHas) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
- Object* key = args[1];
- Object* lookup = table->Lookup(key);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
-}
-
-
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/src/runtime.h b/src/runtime.h
index 9d53c35..7a21bb9 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -89,6 +89,7 @@
F(ForceParallelRecompile, 1, 1) \
F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
+ F(NotifyICMiss, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
@@ -113,7 +114,6 @@
F(Typeof, 1, 1) \
\
F(StringToNumber, 1, 1) \
- F(StringFromCharCodeArray, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringToLowerCase, 1, 1) \
@@ -122,10 +122,6 @@
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -194,6 +190,10 @@
\
/* JSON */ \
F(ParseJson, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
+ F(QuoteJSONStringComma, 1, 1) \
+ F(QuoteJSONStringArray, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -207,6 +207,8 @@
F(StringTrim, 3, 1) \
F(StringToArray, 2, 1) \
F(NewStringWrapper, 1, 1) \
+ F(NewString, 2, 1) \
+ F(TruncateString, 2, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -331,7 +333,6 @@
F(CreateObjectHashTable, 0, 1) \
F(ObjectHashTableGet, 2, 1) \
F(ObjectHashTableSet, 3, 1) \
- F(ObjectHashTableHas, 2, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
@@ -530,6 +531,8 @@
F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index 714e5c3..9e42304 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -59,7 +59,8 @@
SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB);
code_ = code;
Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
@@ -158,14 +159,6 @@
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
- while (assembler->pc_offset() < target_offset) {
- assembler->nop();
- }
-
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");
diff --git a/src/serialize.cc b/src/serialize.cc
index dfc5574..26e0f01 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -30,6 +30,7 @@
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
+#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
@@ -527,6 +528,17 @@
UNCLASSIFIED,
51,
"Code::MakeCodeYoung");
+
+ // Add a small set of deopt entry addresses to encoder without generating the
+ // deopt table code, which isn't possible at deserialization time.
+ HandleScope scope(Isolate::Current());
+ for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+ Address address = Deoptimizer::GetDeoptimizationEntry(
+ entry,
+ Deoptimizer::LAZY,
+ Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+ Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
+ }
}
diff --git a/src/serialize.h b/src/serialize.h
index 2041792..4bbde5a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -47,10 +47,11 @@
EXTENSION,
ACCESSOR,
RUNTIME_ENTRY,
- STUB_CACHE_TABLE
+ STUB_CACHE_TABLE,
+ LAZY_DEOPTIMIZATION
};
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
@@ -59,6 +60,7 @@
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
+const int kDeoptTableSerializeEntryCount = 8;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
diff --git a/src/smart-pointers.h b/src/smart-pointers.h
index 345c4d4..02025bb 100644
--- a/src/smart-pointers.h
+++ b/src/smart-pointers.h
@@ -58,11 +58,16 @@
// You can get the underlying pointer out with the * operator.
inline T* operator*() { return p_; }
- // You can use [n] to index as if it was a plain pointer
+ // You can use [n] to index as if it was a plain pointer.
inline T& operator[](size_t i) {
return p_[i];
}
+ // You can use [n] to index as if it was a plain pointer.
+ const inline T& operator[](size_t i) const {
+ return p_[i];
+ }
+
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
// returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -77,6 +82,11 @@
return temp;
}
+ inline void Reset(T* new_value) {
+ if (p_) Deallocator::Delete(p_);
+ p_ = new_value;
+ }
+
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
diff --git a/src/spaces.cc b/src/spaces.cc
index cacd969..ec9f30d 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1680,6 +1680,7 @@
CASE(FUNCTION);
CASE(OPTIMIZED_FUNCTION);
CASE(STUB);
+ CASE(COMPILED_STUB);
CASE(BUILTIN);
CASE(LOAD_IC);
CASE(KEYED_LOAD_IC);
diff --git a/src/string.js b/src/string.js
index 6115930..badfad3 100644
--- a/src/string.js
+++ b/src/string.js
@@ -186,6 +186,9 @@
}
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even though
+ // value is discarded.
+ ToInteger(regexp.lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
@@ -227,6 +230,9 @@
// Delegate to one of the regular expression variants if necessary.
if (IS_REGEXP(search)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even though
+ // value is discarded.
+ ToInteger(search.lastIndex);
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (IS_SPEC_FUNCTION(replace)) {
if (search.global) {
@@ -451,7 +457,10 @@
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
+ if (IS_NULL(matchInfo)) {
+ regexp.lastIndex = 0;
+ return subject;
+ }
var index = matchInfo[CAPTURE0];
var result = SubString(subject, 0, index);
var endOfMatch = matchInfo[CAPTURE1];
@@ -801,6 +810,7 @@
var static_charcode_array = new InternalArray(4);
+
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
@@ -809,17 +819,24 @@
return %_StringCharFromCode(code & 0xffff);
}
- // NOTE: This is not super-efficient, but it is necessary because we
- // want to avoid converting to numbers from within the virtual
- // machine. Maybe we can find another way of doing this?
- var codes = static_charcode_array;
- for (var i = 0; i < n; i++) {
+ var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
+ var i;
+ for (i = 0; i < n; i++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code);
- codes[i] = code;
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ if (code > 0x7f) break;
+ %_OneByteSeqStringSetChar(one_byte, i, code);
}
- codes.length = n;
- return %StringFromCharCodeArray(codes);
+ if (i == n) return one_byte;
+ one_byte = %TruncateString(one_byte, i);
+
+ var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
+ for (var j = 0; i < n; i++, j++) {
+ var code = %_Arguments(i);
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ %_TwoByteSeqStringSetChar(two_byte, j, code);
+ }
+ return one_byte + two_byte;
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index f858e47..c562bc7 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -681,13 +681,6 @@
Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
CodeHandleList* handler_ics);
- static void GenerateLoadExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
-
- static void GenerateLoadFastElement(MacroAssembler* masm);
-
- static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
-
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
diff --git a/src/uri.js b/src/uri.js
index b195f3d..1de22f8 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -165,11 +165,11 @@
throw new $URIError("URI malformed");
}
if (value < 0x10000) {
- result[index++] = value;
+ %_TwoByteSeqStringSetChar(result, index++, value);
return index;
} else {
- result[index++] = (value >> 10) + 0xd7c0;
- result[index++] = (value & 0x3ff) + 0xdc00;
+ %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
+ %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
return index;
}
}
@@ -178,43 +178,72 @@
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var array = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
if (unescape(cc1)) {
- result[index++] = cc1;
+ array[index++] = cc1;
} else {
if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, result, index);
+ index = URIEncodeSingle(cc1, array, index);
} else {
k++;
if (k == uriLength) throw new $URIError("URI malformed");
var cc2 = uri.charCodeAt(k);
if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, result, index);
+ index = URIEncodePair(cc1, cc2, array, index);
}
}
}
- return %StringFromCharCodeArray(result);
+
+ var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
+ for (var i = 0; i < array.length; i++) {
+ %_OneByteSeqStringSetChar(result, i, array[i]);
+ }
+ return result;
}
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var ch = uri.charAt(k);
- if (ch == '%') {
+ var k = 0;
+
+ // Optimistically assume ascii string.
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
+ if (cc >> 7) break; // Assumption wrong, two byte string.
+ if (reserved(cc)) {
+ %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+ } else {
+ %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ }
+ k += 2;
+ } else {
+ if (code > 0x7f) break; // Assumption wrong, two byte string.
+ %_OneByteSeqStringSetChar(one_byte, index++, code);
+ }
+ }
+
+ one_byte = %TruncateString(one_byte, index);
+ if (k == uriLength) return one_byte;
+
+ // Write into two byte string.
+ var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
+ index = 0;
+
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
@@ -229,22 +258,21 @@
octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
uri.charCodeAt(++k));
}
- index = URIDecodeOctets(octets, result, index);
+ index = URIDecodeOctets(octets, two_byte, index);
+ } else if (reserved(cc)) {
+ %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
} else {
- if (reserved(cc)) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = uri.charCodeAt(k - 1);
- result[index++] = uri.charCodeAt(k);
- } else {
- result[index++] = cc;
- }
+ %_TwoByteSeqStringSetChar(two_byte, index++, cc);
}
} else {
- result[index++] = ch.charCodeAt(0);
+ %_TwoByteSeqStringSetChar(two_byte, index++, code);
}
}
- result.length = index;
- return %StringFromCharCodeArray(result);
+
+ two_byte = %TruncateString(two_byte, index);
+ return one_byte + two_byte;
}
diff --git a/src/utils.h b/src/utils.h
index e03f96f..b4cd5a8 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1015,6 +1015,7 @@
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+ static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1030,9 +1031,12 @@
// code (function declarations).
static const int kDeclarationsId = 3;
- // Ever FunctionState starts with this id.
+ // Every FunctionState starts with this id.
static const int kFirstUsableId = 4;
+ // Every compiled stub starts with this id.
+ static const int kStubEntryId = 5;
+
int id_;
};
diff --git a/src/version.cc b/src/version.cc
index 5b25237..e2997ce 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 15
-#define BUILD_NUMBER 8
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 370cb02..cc07287 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -201,7 +201,8 @@
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+ Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 24c8df3..9471a6d 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -95,21 +95,24 @@
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
+ static const int kMaxNumAllocatableRegisters = 10;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"rax",
"rbx",
@@ -157,7 +160,7 @@
int code_;
private:
- static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
@@ -200,7 +203,10 @@
struct XMMRegister {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ static const int kMaxNumAllocatableRegisters = 15;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
@@ -208,13 +214,13 @@
}
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kNumAllocatableRegisters);
+ ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
XMMRegister result = { index + 1 };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index ed0ec68..e156dfd 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -646,6 +646,25 @@
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ Pushad();
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ Popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@@ -660,17 +679,17 @@
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 9705718..f950368 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -36,6 +36,18 @@
namespace v8 {
namespace internal {
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index ab8ea76..71cef58 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -37,7 +37,7 @@
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -60,7 +60,7 @@
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -79,7 +79,7 @@
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -216,7 +216,7 @@
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -238,7 +238,7 @@
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -250,7 +250,7 @@
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() {}
@@ -287,7 +287,7 @@
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -316,7 +316,7 @@
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -378,7 +378,7 @@
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -561,7 +561,7 @@
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 67d1d90..7954604 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -606,6 +606,46 @@
}
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ Check(masm->CheckSmi(index), "Non-smi index");
+ __ Check(masm->CheckSmi(value), "Non-smi value");
+
+ __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ SmiCompare(index, Smi::FromInt(0));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
XMMRegister input,
XMMRegister result,
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index d444095..3a7646b 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -44,6 +44,10 @@
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -63,6 +67,8 @@
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index a3fe8f9..20f507b 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -211,7 +211,7 @@
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@@ -248,7 +248,7 @@
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -340,7 +340,7 @@
unsigned pc_offset = data->OsrPcOffset()->value();
intptr_t pc = reinterpret_cast<intptr_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@@ -459,6 +459,70 @@
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-rbp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-rsp
+ // | | saved frame (rbp) |
+ // | +=========================+<-rbp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | rbp = saved frame
+ // +-------------------------+ rsi = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-rsp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+ Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(rbp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(rsi.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(rdx.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(rax.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
@@ -878,7 +942,7 @@
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -898,10 +962,10 @@
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::NumAllocatableRegisters();
__ subq(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
@@ -990,7 +1054,7 @@
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
@@ -1011,10 +1075,13 @@
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
@@ -1031,28 +1098,33 @@
__ pop(rax);
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(rbx, src_offset));
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 650337a..c87155f 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -3047,6 +3047,38 @@
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9217a94..0e5c99b 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -119,35 +119,45 @@
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
}
info()->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS function.
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ if (info()->IsStub()) {
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ __ push(rdi); // Callee's JS function.
+ }
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
@@ -177,7 +187,7 @@
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@@ -213,7 +223,7 @@
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -266,9 +276,55 @@
bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
- __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+ Address entry = jump_table_[i].address;
+ if (jump_table_[i].needs_frame) {
+ __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ if (jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(r8);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ jmp(kScratchRegister);
+ }
+ }
+ } else {
+ if (jump_table_[i].is_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
}
return !is_aborted();
}
@@ -280,10 +336,32 @@
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(rbp); // Caller's frame pointer.
+ __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
__ jmp(code->exit());
}
}
@@ -396,7 +474,9 @@
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -420,6 +500,9 @@
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
}
// Inlined frames which push their arguments cause the index to be
@@ -610,20 +693,33 @@
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool lazy_deopt = info()->IsStub();
if (cc == no_condition) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ if (lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry), zone());
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != lazy_deopt) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
+ jump_table_.Add(table_entry, zone());
}
__ j(cc, &jump_table_.last().label);
}
@@ -1437,6 +1533,15 @@
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -2288,15 +2393,22 @@
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ if (NeedsEagerFrame()) {
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
+ if (info()->IsStub()) {
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Ret(0, r10);
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ }
}
@@ -4527,10 +4639,10 @@
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -4548,7 +4660,7 @@
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
@@ -4615,7 +4727,7 @@
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
@@ -4624,7 +4736,7 @@
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
}
@@ -5160,6 +5272,7 @@
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index e068f14..2fa10e1 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -63,6 +63,7 @@
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -77,6 +78,15 @@
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
@@ -110,7 +120,7 @@
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -158,7 +168,7 @@
Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -327,11 +337,15 @@
int* offset);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt(int space_needed);
@@ -360,6 +374,7 @@
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -374,6 +389,7 @@
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index e102803..c9c99de 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -44,10 +44,10 @@
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -619,6 +619,8 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -1581,6 +1583,17 @@
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(rcx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), rcx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1617,8 +1630,12 @@
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1636,6 +1653,7 @@
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
@@ -1649,6 +1667,7 @@
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -2115,8 +2134,17 @@
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2212,6 +2240,7 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index b5d435b..f5f0250 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -149,6 +149,7 @@
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -251,6 +252,11 @@
void MarkAsCall() { is_call_ = true; }
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
@@ -1155,6 +1161,30 @@
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -2266,8 +2296,9 @@
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 4e4f2c5..8513a68 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -3432,7 +3432,7 @@
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -3476,7 +3476,7 @@
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 0d8d6f2..716a591 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1414,9 +1414,9 @@
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 683aa9d..0329966 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -3210,12 +3210,19 @@
// -- rsp[0] : return address
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ }
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
return GetCode(Code::NORMAL, factory()->empty_string());
@@ -3457,140 +3464,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rcx, rax);
- __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -3780,98 +3653,6 @@
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss_force_generic);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- __ SmiToInteger32(kScratchRegister, rax);
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
- Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
- FixedDoubleArray::kHeaderSize));
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- // Set the value.
- __ movq(rax, rcx);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,