Merge V8 at 3.9.24.13
Bug: 5688872
Change-Id: Id0aa8d23375030494d3189c31774059c0f5398fc
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8db54f0..a9cc2ef 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
+#include "x64/assembler-x64.h"
+
#include "cpu.h"
#include "debug.h"
#include "v8memory.h"
@@ -224,24 +226,31 @@
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
- return Assembler::kCallTargetSize;
+ return Assembler::kSpecialTargetSize;
} else {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (IsCodeTarget(rmode_)) {
Assembler::set_target_address_at(pc_, target);
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
} else {
Memory::Address_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
@@ -255,7 +264,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Memory::Object_Handle_at(pc_);
@@ -277,10 +286,16 @@
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
+ Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -301,11 +316,19 @@
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -344,6 +367,11 @@
target;
CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
sizeof(Address));
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -368,14 +396,14 @@
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
@@ -396,14 +424,14 @@
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 745fdae..2f0c542 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,7 +47,7 @@
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -383,7 +383,7 @@
}
#endif
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -412,7 +412,7 @@
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -426,13 +426,7 @@
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- while (delta >= 9) {
- nop(9);
- delta -= 9;
- }
- if (delta > 0) {
- nop(delta);
- }
+ Nop(delta);
}
@@ -441,6 +435,15 @@
}
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
@@ -499,7 +502,7 @@
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -772,7 +775,7 @@
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- if (dst.code() > 3) {
+ if (!dst.is_byte_register()) {
// Use 64-bit mode byte registers.
emit_rex_64(dst);
}
@@ -1056,7 +1059,7 @@
void Assembler::decb(Register dst) {
EnsureSpace ensure_space(this);
- if (dst.code() > 3) {
+ if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst);
}
@@ -1384,7 +1387,7 @@
void Assembler::movb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- if (dst.code() > 3) {
+ if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst, src);
} else {
@@ -1397,7 +1400,7 @@
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
- if (dst.code() > 3) {
+ if (!dst.is_byte_register()) {
emit_rex_32(dst);
}
emit(0xB0 + dst.low_bits());
@@ -1407,7 +1410,7 @@
void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- if (src.code() > 3) {
+ if (!src.is_byte_register()) {
emit_rex_32(src, dst);
} else {
emit_optional_rex_32(src, dst);
@@ -1637,6 +1640,8 @@
void Assembler::movzxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
@@ -1763,7 +1768,7 @@
}
-void Assembler::nop(int n) {
+void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
@@ -1778,73 +1783,64 @@
// 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
// 00000000H] 00H
- ASSERT(1 <= n);
- ASSERT(n <= 9);
EnsureSpace ensure_space(this);
- switch (n) {
- case 1:
- emit(0x90);
- return;
- case 2:
- emit(0x66);
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 9:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
+ while (n > 0) {
+ switch (n) {
+ case 2:
+ emit(0x66);
+ case 1:
+ emit(0x90);
+ return;
+ case 3:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x00);
+ return;
+ case 4:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x40);
+ emit(0x00);
+ return;
+ case 6:
+ emit(0x66);
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 7:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x80);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ default:
+ case 11:
+ emit(0x66);
+ n--;
+ case 10:
+ emit(0x66);
+ n--;
+ case 9:
+ emit(0x66);
+ n--;
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ n -= 8;
+ }
}
}
@@ -1937,7 +1933,7 @@
}
EnsureSpace ensure_space(this);
ASSERT(is_uint4(cc));
- if (reg.code() > 3) { // Use x64 byte registers, where different.
+ if (!reg.is_byte_register()) { // Use x64 byte registers, where different.
emit_rex_32(reg);
}
emit(0x0F);
@@ -2002,7 +1998,7 @@
emit(0x84);
emit_modrm(src, dst);
} else {
- if (dst.code() > 3 || src.code() > 3) {
+ if (!dst.is_byte_register() || !src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst, src);
}
@@ -2019,7 +2015,7 @@
emit(0xA8);
emit(mask.value_); // Low byte emitted.
} else {
- if (reg.code() > 3) {
+ if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg);
}
@@ -2042,7 +2038,7 @@
void Assembler::testb(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
- if (reg.code() > 3) {
+ if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg, op);
} else {
@@ -2299,6 +2295,13 @@
}
+void Assembler::fptan() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xF2);
+}
+
+
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
emit(0xD9);
@@ -2306,6 +2309,27 @@
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ emit(0xDB);
+ emit(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -2565,7 +2589,8 @@
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(is_uint2(imm8));
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2983,7 +3008,7 @@
return;
}
}
- RelocInfo rinfo(pc_, rmode, data);
+ RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
@@ -3020,8 +3045,6 @@
return (1 << rmode_) & kApplyMask;
}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 2e373fa..60b29e6 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// A lightweight X64 Assembler.
@@ -45,22 +45,22 @@
// Utility functions
// Test whether a 64-bit value is in a specific range.
-static inline bool is_uint32(int64_t x) {
+inline bool is_uint32(int64_t x) {
static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return static_cast<uint64_t>(x) <= kMaxUInt32;
}
-static inline bool is_int32(int64_t x) {
+inline bool is_int32(int64_t x) {
static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
return is_uint32(x - kMinInt32);
}
-static inline bool uint_is_int32(uint64_t x) {
+inline bool uint_is_int32(uint64_t x) {
static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
return x <= kMaxInt32;
}
-static inline bool is_uint32(uint64_t x) {
+inline bool is_uint32(uint64_t x) {
static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return x <= kMaxUInt32;
}
@@ -131,6 +131,8 @@
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
+ // rax, rbx, rcx and rdx are byte registers, the rest are not.
+ bool is_byte_register() const { return code_ <= 3; }
int code() const {
ASSERT(is_valid());
return code_;
@@ -159,23 +161,41 @@
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
-const Register rax = { 0 };
-const Register rcx = { 1 };
-const Register rdx = { 2 };
-const Register rbx = { 3 };
-const Register rsp = { 4 };
-const Register rbp = { 5 };
-const Register rsi = { 6 };
-const Register rdi = { 7 };
-const Register r8 = { 8 };
-const Register r9 = { 9 };
-const Register r10 = { 10 };
-const Register r11 = { 11 };
-const Register r12 = { 12 };
-const Register r13 = { 13 };
-const Register r14 = { 14 };
-const Register r15 = { 15 };
-const Register no_reg = { -1 };
+const int kRegister_rax_Code = 0;
+const int kRegister_rcx_Code = 1;
+const int kRegister_rdx_Code = 2;
+const int kRegister_rbx_Code = 3;
+const int kRegister_rsp_Code = 4;
+const int kRegister_rbp_Code = 5;
+const int kRegister_rsi_Code = 6;
+const int kRegister_rdi_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11;
+const int kRegister_r12_Code = 12;
+const int kRegister_r13_Code = 13;
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+const int kRegister_no_reg_Code = -1;
+
+const Register rax = { kRegister_rax_Code };
+const Register rcx = { kRegister_rcx_Code };
+const Register rdx = { kRegister_rdx_Code };
+const Register rbx = { kRegister_rbx_Code };
+const Register rsp = { kRegister_rsp_Code };
+const Register rbp = { kRegister_rbp_Code };
+const Register rsi = { kRegister_rsi_Code };
+const Register rdi = { kRegister_rdi_Code };
+const Register r8 = { kRegister_r8_Code };
+const Register r9 = { kRegister_r9_Code };
+const Register r10 = { kRegister_r10_Code };
+const Register r11 = { kRegister_r11_Code };
+const Register r12 = { kRegister_r12_Code };
+const Register r13 = { kRegister_r13_Code };
+const Register r14 = { kRegister_r14_Code };
+const Register r15 = { kRegister_r15_Code };
+const Register no_reg = { kRegister_no_reg_Code };
struct XMMRegister {
@@ -215,6 +235,12 @@
return names[index];
}
+ static XMMRegister from_code(int code) {
+ ASSERT(code >= 0);
+ ASSERT(code < kNumRegisters);
+ XMMRegister r = { code };
+ return r;
+ }
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
@@ -551,8 +577,8 @@
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -565,8 +591,7 @@
inline Handle<Object> code_target_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
- static const int kCallTargetSize = 4; // Use 32-bit displacement.
- static const int kExternalTargetSize = 8; // Use 64-bit absolute.
+ static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
@@ -630,6 +655,7 @@
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -643,7 +669,6 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
- void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -735,6 +760,10 @@
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x01, src, dst);
+ }
+
void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
@@ -1145,7 +1174,6 @@
void hlt();
void int3();
void nop();
- void nop(int n);
void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1266,7 +1294,11 @@
void fsin();
void fcos();
+ void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void frndint();
@@ -1388,19 +1420,20 @@
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
protected:
bool emit_debug_code() const { return emit_debug_code_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index db06909..4e037ff 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -73,310 +73,295 @@
}
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
// -----------------------------------
- Label non_function_call;
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &non_function_call);
-
- // Jump to the function-specific construct stub.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
-
- // rdi: called object
- // rax: number of arguments
- __ bind(&non_function_call);
- // Set expected number of arguments to zero (not changing rax).
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
- // Enter a construct frame.
- __ EnterConstructFrame();
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ // Store a smi-tagged arguments count on the stack.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- // Push the function to invoke on the stack.
- __ push(rdi);
+ // Push the function to invoke on the stack.
+ __ push(rdi);
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ movq(kScratchRegister, debug_step_in_fp);
+ __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ j(not_equal, &rt_call);
#endif
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
+ __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // rdi: constructor
+ // rax: initial map
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(rax);
- __ push(rdi);
-
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(rdi);
- __ pop(rax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- { Label loop, entry;
- // To allow for truncation.
if (count_constructions) {
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ Label allocate;
+ // Decrease generous allocation count.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ decb(FieldOperand(rcx,
+ SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
+
+ __ push(rax);
+ __ push(rdi);
+
+ __ push(rdi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(rdi);
+ __ pop(rax);
+
+ __ bind(&allocate);
}
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shl(rdi, Immediate(kPointerSizeLog2));
+ // rdi: size of new object
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // rdi: start of next object
+ __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ // Set extra fields in the newly allocated object.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rdi);
- __ j(less, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
+ if (count_constructions) {
+ __ movzxbq(rsi,
+ FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lea(rsi,
+ Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
+ // rsi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmpq(rsi, rdi);
+ __ Assert(less_equal,
+ "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+ __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ }
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ or_(rbx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ // Calculate total properties described map.
+ __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbq(rcx,
+ FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ addq(rdx, rcx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subq(rdx, rcx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // rbx: JSObject
+ // rdi: start of next object (will be start of FixedArray)
+ // rdx: number of elements in properties array
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rdx: number of elements
+ // rax: start of next object
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+ __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+
+ // Initialize the fields to undefined.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rax: start of next object
+ // rdx: number of elements
+ { Label loop, entry;
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(below, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // rbx: JSObject
+ // rdi: FixedArray
+ __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+ // Continue with JSObject being successfully allocated
+ // rbx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // rbx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(rbx);
}
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ // Allocate the new receiver object using the runtime call.
+ // rdi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore rdi (constructor) before calling runtime.
+ __ movq(rdi, Operand(rsp, 0));
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ movq(rbx, rax); // store result in rbx
+ // New object allocated.
+ // rbx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(rdi);
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movq(rax, Operand(rsp, 0));
+ __ SmiToInteger32(rax, rax);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(rbx);
+ __ push(rbx);
+
+ // Set up pointer to last argument.
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ if (is_api_function) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+
+ // Leave construct frame.
}
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
-
- // Setup pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
- __ LeaveConstructFrame();
-
// Remove caller arguments from the stack and return.
__ pop(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -413,104 +398,108 @@
// - Object*** argv
// (see Handle::Invoke in execution.cc).
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
+ // Open a C++ scope for the FrameScope.
+ {
+ // Platform specific argument handling. After this, the stack contains
+ // an internal frame and the pushed function and receiver, and
+ // register rax and rbx holds the argument count and argument array,
+ // while rdi holds the function pointer and rsi the context.
+
#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
+ // MSVC parameters in:
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
+ // [rsp+0x20] : argv
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- __ EnterInternalFrame();
+ // Clear the context before we push it when entering the internal frame.
+ __ Set(rsi, 0);
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ // Load the function context into rsi.
+ __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ // Push the function and the receiver onto the stack.
+ __ push(rdx);
+ __ push(r8);
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, r9);
+ // Load the previous frame pointer to access C argument on stack
+ __ movq(kScratchRegister, Operand(rbp, 0));
+ __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ // Load the function pointer into rdi.
+ __ movq(rdi, rdx);
#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
+ // GCC parameters in:
+ // rdi : entry (ignored)
+ // rsi : function
+ // rdx : receiver
+ // rcx : argc
+ // r8 : argv
- __ movq(rdi, rsi);
- // rdi : function
+ __ movq(rdi, rsi);
+ // rdi : function
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- __ EnterInternalFrame();
+ // Clear the context before we push it when entering the internal frame.
+ __ Set(rsi, 0);
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Push the function and receiver and setup the context.
+ __ push(rdi);
+ __ push(rdx);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, rcx);
+ __ movq(rbx, r8);
#endif // _WIN64
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
+ // Current stack contents:
+ // [rsp + 2 * kPointerSize ... ]: Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
+ // Current register contents:
+ // rax : argc
+ // rbx : argv
+ // rsi : context
+ // rdi : function
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
+ // Copy arguments to the stack in a loop.
+ // Register rbx points to array of pointers to handle locations.
+ // Push the values of these handles.
+ Label loop, entry;
+ __ Set(rcx, 0); // Set loop variable to 0.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addq(rcx, Immediate(1));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(not_equal, &loop);
- // Invoke the code.
- if (is_construct) {
- // Expects rdi to hold function pointer.
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Invoke the code.
+ if (is_construct) {
+ // Expects rdi to hold function pointer.
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(rax);
+ // Function must be in rdi.
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+ // Exit the internal frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
}
- // Exit the JS frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- __ LeaveInternalFrame();
// TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // remove receiver
+ __ ret(1 * kPointerSize); // Remove receiver.
}
@@ -526,23 +515,24 @@
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -552,23 +542,24 @@
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
+ __ pop(rdi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -579,14 +570,15 @@
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down internal frame.
+ }
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -623,9 +615,10 @@
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ Pushad();
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ Popad();
__ ret(0);
}
@@ -647,7 +640,7 @@
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(FACTORY->undefined_value());
+ __ Push(masm->isolate()->factory()->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
@@ -695,18 +688,21 @@
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- __ LeaveInternalFrame();
+ __ pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ jmp(&patch_receiver, Label::kNear);
@@ -807,168 +803,166 @@
// rsp+8: arguments
// rsp+16: receiver ("this")
// rsp+24: function
- __ EnterInternalFrame();
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
- // Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ // Get the receiver.
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &push_receiver);
- // Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Change context eagerly to get the right global object if necessary.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &push_receiver);
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
+ // Do not transform the receiver for natives.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ j(equal, &use_global_receiver);
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &use_global_receiver);
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &push_receiver);
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver, Label::kNear);
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
- // Push the nth argument.
- __ push(rax);
+ // Push the nth argument.
+ __ push(rax);
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ __ movq(Operand(rbp, kIndexOffset), rax);
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(rax);
+ __ SmiToInteger32(rax, rax);
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ frame_scope.GenerateLeaveFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(rdi); // add function proxy as last argument
+ __ incq(rax);
+ __ Set(rbx, 0);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- __ LeaveInternalFrame();
+ // Leave internal frame.
+ }
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
}
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
@@ -979,13 +973,11 @@
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity >= 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
- // Load the initial map from the array function.
- __ movq(scratch1, FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1005,9 +997,10 @@
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
@@ -1015,7 +1008,7 @@
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
return;
}
@@ -1032,15 +1025,14 @@
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
+ factory->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, FACTORY->the_hole_value());
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -1051,13 +1043,17 @@
}
} else {
Label loop, entry;
+ __ movq(scratch2, Immediate(initial_capacity));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(scratch1, 0), scratch3);
- __ addq(scratch1, Immediate(kPointerSize));
+ __ movq(FieldOperand(scratch1,
+ scratch2,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ scratch3);
__ bind(&entry);
- __ cmpq(scratch1, scratch2);
- __ j(below, &loop);
+ __ decq(scratch2);
+ __ j(not_sign, &loop);
}
}
@@ -1073,38 +1069,22 @@
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
+ __ LoadInitialArrayMap(array_function, scratch, elements_array);
- // Load the initial map from the array function.
- __ movq(elements_array,
- FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check whether an empty sized array is requested.
- __ testq(array_size, array_size);
- __ j(not_zero, ¬_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ testq(array_size, array_size);
+ __ Assert(not_zero, "array size is unexpectedly 0");
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
- __ bind(¬_empty);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1122,9 +1102,9 @@
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
- __ bind(&allocated);
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, FACTORY->empty_fixed_array());
+ __ Move(elements_array, factory->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1143,16 +1123,7 @@
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- FACTORY->fixed_array_map());
- Label not_empty_2, fill_array;
- __ SmiTest(array_size);
- __ j(not_zero, ¬_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
- Smi::FromInt(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(¬_empty_2);
+ factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1161,10 +1132,9 @@
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- __ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, FACTORY->the_hole_value());
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
@@ -1193,13 +1163,15 @@
// Both registers are preserved by this code so no need to differentiate between
// a construct call and a normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more;
+ Label* call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments.
__ testq(rax, rax);
__ j(not_zero, &argc_one_or_more);
+ __ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
rdi,
@@ -1207,7 +1179,6 @@
rcx,
rdx,
r8,
- kPreallocatedArrayElements,
call_generic_code);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->array_function_native(), 1);
@@ -1220,6 +1191,16 @@
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+
+ __ SmiTest(rdx);
+ __ j(not_zero, ¬_empty_array);
+ __ pop(r8); // Adjust stack.
+ __ Drop(1);
+ __ push(r8);
+ __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
+ __ jmp(&empty_array);
+
+ __ bind(¬_empty_array);
__ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
@@ -1289,8 +1270,11 @@
__ movq(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), kScratchRegister);
+ __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(r8, &has_non_smi_element);
+ }
+ __ movq(Operand(rdx, 0), r8);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
__ decq(rcx);
@@ -1301,11 +1285,81 @@
// rbx: JSArray
// esp[0]: return address
// esp[8]: last argument
+ __ bind(&finish);
__ pop(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ movq(rax, rbx);
__ ret(0);
+
+ __ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(r8,
+ masm->isolate()->factory()->heap_number_map(),
+ ¬_double,
+ DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
+ __ UndoAllocationInNewSpace(rbx);
+ __ jmp(call_generic_code);
+
+ __ bind(¬_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // rbx: JSArray
+ __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r11,
+ kScratchRegister,
+ &cant_transition_map);
+
+ __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
+ __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Finish the array initialization loop.
+ Label loop2;
+ __ bind(&loop2);
+ __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), r8);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ decq(rcx);
+ __ j(greater_equal, &loop2);
+ __ jmp(&finish);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for InternalArray function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Check(equal, "Unexpected initial map for InternalArray function");
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -1378,9 +1432,130 @@
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // TODO(849): implement custom construct stub.
- // Generate a copy of the generic stub for now.
- Generate_JSConstructStubGeneric(masm);
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
+ __ cmpq(rdi, rcx);
+ __ Assert(equal, "Unexpected String function");
+ }
+
+ // Load the first argument into rax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ testq(rax, rax);
+ __ j(zero, &no_arguments);
+ __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ push(rcx);
+ __ movq(rax, rbx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ false, // Input is known to be smi?
+ ¬_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- rbx : argument converted to string
+ // -- rdi : constructor function
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into rax.
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ rax, // Result.
+ rcx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(rdi, rcx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
+ Immediate(JSValue::kSize >> kPointerSizeLog2));
+ __ Assert(equal, "Unexpected string wrapper instance size");
+ __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
+ __ Assert(equal, "Unexpected unused properties of string wrapper");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+
+ // Set properties and elements.
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+ // Set the value.
+ __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(¬_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ movq(rbx, rax);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into rbx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rdi); // Preserve the function.
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(rdi);
+ }
+ __ movq(rbx, rax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into rbx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, kPointerSize));
+ __ push(rcx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rbx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ ret(0);
}
@@ -1489,6 +1664,9 @@
__ bind(&invoke);
__ call(rdx);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
@@ -1520,10 +1698,11 @@
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1541,7 +1720,9 @@
StackCheckStub stub;
__ TailCallStub(&stub);
- __ Abort("Unreachable code: returned from tail call.");
+ if (FLAG_debug_code) {
+ __ Abort("Unreachable code: returned from tail call.");
+ }
__ bind(&ok);
__ ret(0);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 6499ea0..2845039 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -68,9 +68,9 @@
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -124,12 +124,12 @@
// Get the function from the stack.
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ Set(rbx, 0); // Set to NULL.
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
__ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
@@ -155,6 +155,131 @@
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + (1 * kPointerSize)]: function
+ // [rsp + (2 * kPointerSize)]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Get the serialized scope info from the stack.
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+ // Set up the object header.
+ __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmpq(rcx, Immediate(0));
+ __ Assert(equal, message);
+ }
+ __ movq(rcx, GlobalObjectOperand());
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Set up the fixed slots.
+ __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
+ __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
+ __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
+
+ // Copy the global object from the previous context.
+ __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(2 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // rcx: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ } else {
+ ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+ int i;
+ for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ while (i < elements_size) {
+ __ movsd(xmm0, FieldOperand(rcx, i));
+ __ movsd(FieldOperand(rdx, i), xmm0);
+ i += kDoubleSize;
+ }
+ ASSERT(i == elements_size);
+ }
+ }
+}
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -162,29 +287,54 @@
// [rsp + (2 * kPointerSize)]: literal index.
// [rsp + (3 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into rcx and check if we need to create a
// boilerplate.
- Label slow_case;
__ movq(rcx, Operand(rsp, 3 * kPointerSize));
__ movq(rax, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ movq(rcx,
FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ Label slow_case;
__ j(equal, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ // rcx is boilerplate object.
+ Factory* factory = masm->isolate()->factory();
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ factory->fixed_cow_array_map());
+ __ j(not_equal, &check_fast_elements);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&check_fast_elements);
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ factory->fixed_array_map());
+ __ j(not_equal, &double_elements);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
@@ -196,33 +346,7 @@
__ pop(rcx);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- }
-
- // Return and remove the on-stack parameters.
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
__ ret(3 * kPointerSize);
__ bind(&slow_case);
@@ -230,9 +354,54 @@
}
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: object literal flags.
+ // [rsp + (2 * kPointerSize)]: constant properties.
+ // [rsp + (3 * kPointerSize)]: literal index.
+ // [rsp + (4 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
+ __ j(not_equal, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(4 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
Label patch;
const Register argument = rax;
const Register map = rdx;
@@ -328,6 +497,25 @@
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ __ PushCallerSaved(save_doubles_);
+ const int argument_count = 1;
+ __ PrepareCallCFunction(argument_count);
+#ifdef _WIN64
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ __ PopCallerSaved(save_doubles_);
+ __ ret(0);
+}
+
+
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
@@ -622,12 +810,13 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rax);
+ }
__ bind(&heapnumber_allocated);
// rcx: allocated 'empty' number
@@ -751,6 +940,10 @@
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -1414,6 +1607,8 @@
__ cmpq(rbx, Operand(rcx, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->transcendental_cache_hit(), 1);
__ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
__ fstp(0); // Clear FPU stack.
@@ -1424,6 +1619,7 @@
}
__ bind(&cache_miss);
+ __ IncrementCounter(counters->transcendental_cache_miss(), 1);
// Update cache with new value.
if (tagged) {
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
@@ -1432,7 +1628,7 @@
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
}
- GenerateOperation(masm);
+ GenerateOperation(masm, type_);
__ movq(Operand(rcx, 0), rbx);
__ movq(Operand(rcx, 2 * kIntSize), rax);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
@@ -1447,17 +1643,18 @@
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), xmm1);
__ fld_d(Operand(rsp, 0));
- GenerateOperation(masm);
+ GenerateOperation(masm, type_);
__ fstp_d(Operand(rsp, 0));
__ movsd(xmm1, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Allocate an unused object bigger than a HeapNumber.
+ __ Push(Smi::FromInt(2 * kDoubleSize));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
@@ -1473,10 +1670,11 @@
__ bind(&runtime_call);
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -1488,6 +1686,7 @@
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -1496,14 +1695,17 @@
}
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+void TranscendentalCacheStub::GenerateOperation(
+ MacroAssembler* masm, TranscendentalCache::Type type) {
// Registers:
// rax: Newly allocated HeapNumber, which must be preserved.
// rbx: Bits of input double. Must be preserved.
// rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double
Label done;
- if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+ if (type == TranscendentalCache::SIN ||
+ type == TranscendentalCache::COS ||
+ type == TranscendentalCache::TAN) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
@@ -1524,8 +1726,12 @@
__ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
- __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
- __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ // NaN is represented by 0x7ff8000000000000.
+ __ subq(rsp, Immediate(kPointerSize));
+ __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
+ __ movl(Operand(rsp, 0), Immediate(0x00000000));
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
__ jmp(&done);
__ bind(&non_nan_result);
@@ -1566,19 +1772,25 @@
// FPU Stack: input % 2*pi
__ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
__ bind(&in_range);
- switch (type_) {
+ switch (type) {
case TranscendentalCache::SIN:
__ fsin();
break;
case TranscendentalCache::COS:
__ fcos();
break;
+ case TranscendentalCache::TAN:
+ // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+ // FP register stack.
+ __ fptan();
+ __ fstp(0); // Pop FP register stack.
+ break;
default:
UNREACHABLE();
}
__ bind(&done);
} else {
- ASSERT(type_ == TranscendentalCache::LOG);
+ ASSERT(type == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
@@ -1784,152 +1996,259 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // rdx = base
- // rax = exponent
- // rcx = temporary, result
+ // Choose register conforming to calling convention (when bailing out).
+#ifdef _WIN64
+ const Register exponent = rdx;
+#else
+ const Register exponent = rdi;
+#endif
+ const Register base = rax;
+ const Register scratch = rcx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
- Label allocate_return, call_runtime;
+ Label call_runtime, done, exponent_not_smi, int_exponent;
- // Load input parameters.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ // Save 1 in double_result - we need this several times later on.
+ __ movq(scratch, Immediate(1));
+ __ cvtlsi2sd(double_result, scratch);
- // Save 1 in xmm3 - we need this several times later on.
- __ Set(rcx, 1);
- __ cvtlsi2sd(xmm3, rcx);
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ movq(base, Operand(rsp, 2 * kPointerSize));
+ __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(rax, &exponent_nonsmi);
- __ JumpIfNotSmi(rdx, &base_nonsmi);
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&powi);
- // Exponent is a smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
+ __ bind(&base_is_smi);
+ __ SmiToInteger32(base, base);
+ __ cvtlsi2sd(double_base, base);
+ __ bind(&unpack_exponent);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiToInteger32(rax, rax);
+ __ bind(&exponent_not_smi);
+ __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movq(rdx, rax);
+ __ bind(&exponent_not_smi);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, double_exponent);
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmpl(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtlsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, ¬_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to 0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(¬_plus_half);
+ // Load double_scratch with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), double_exponent);
+ __ fld_d(Operand(rsp, 0)); // E
+ __ movsd(Operand(rsp, 0), double_base);
+ __ fld_d(Operand(rsp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(double_result, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&done);
+
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ // Back up exponent as we need to check if exponent is negative later.
+ __ movq(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg, Label::kNear);
- __ negl(rax);
+ Label no_neg, while_true, no_multiply;
+ __ testl(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ negl(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movaps(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shrl(rax, Immediate(1));
+ __ shrl(scratch, Immediate(1));
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
+
+ __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
- // Base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ testl(rdx, rdx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
+ // If the exponent is negative, return 1/result.
+ __ testl(exponent, exponent);
+ __ j(greater, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result);
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // input was a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtlsi2sd(double_exponent, exponent);
- __ jmp(&allocate_return);
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- // Exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in eax.
+ __ bind(&done);
+ __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ // Move base to the correct argument register. Exponent is already in xmm1.
+ __ movsd(xmm0, double_base);
+ ASSERT(double_exponent.is(xmm1));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 2);
+ }
+ // Return value is in xmm0.
+ __ movsd(double_result, xmm0);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- Label base_not_smi, handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases, Label::kNear);
-
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ andl(rcx, Immediate(HeapNumber::kExponentMask));
- __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm2, rcx);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0); // Convert -0 to 0.
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(rcx, rax, &call_runtime);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
- __ movq(rax, rcx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -2043,6 +2362,7 @@
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
+ __ xor_(r8, r8);
__ testq(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -2085,7 +2405,7 @@
__ movq(FieldOperand(rax, i), rdx);
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
@@ -2100,7 +2420,7 @@
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
@@ -2136,16 +2456,13 @@
Label parameters_loop, parameters_test;
// Load tagged parameter count into r9.
- __ movq(r9, Operand(rsp, 1 * kPointerSize));
+ __ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 3 * kPointerSize));
+ __ addq(r8, Operand(rsp, 1 * kPointerSize));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
- __ SmiToInteger64(kScratchRegister, r9);
- __ lea(rdi, Operand(rdi, kScratchRegister,
- times_pointer_size,
- kParameterMapHeaderSize));
+ __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -2181,9 +2498,8 @@
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Untag rcx and r8 for the loop below.
+ // Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ SmiToInteger64(r8, r8);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
__ subq(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
@@ -2307,7 +2623,7 @@
// Get the parameters pointer from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
@@ -2346,10 +2662,6 @@
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// rsp[0]: return address
@@ -2455,26 +2767,40 @@
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ __ andb(rbx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask |
+ kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string.
- __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ // Any other flat string must be a flat ASCII string. None of the following
+ // string type tests will succeed if subject is not a string or a short
+ // external string.
+ __ andb(rbx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
__ j(zero, &seq_ascii_string, Label::kNear);
+ // rbx: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpq(rbx, Immediate(kExternalStringTag));
__ j(less, &cons_string, Label::kNear);
- __ j(equal, &runtime);
+ __ j(equal, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+ __ j(not_zero, &runtime);
// String is sliced.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
@@ -2498,16 +2824,16 @@
Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be ascii.
+ // Any other flat string must be sequential ASCII or external.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
+ __ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
- // rdi: subject string (sequential ascii)
+ // rdi: subject string (sequential ASCII)
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is ascii.
+ __ Set(rcx, 1); // Type is ASCII.
__ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
@@ -2523,7 +2849,7 @@
__ JumpIfSmi(r11, &runtime);
// rdi: subject string
- // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
@@ -2531,7 +2857,7 @@
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
@@ -2588,7 +2914,7 @@
// Keep track on aliasing between argX defined above and the registers used.
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// r14: slice offset
// r15: original subject string
@@ -2670,12 +2996,18 @@
// Store last subject and last input.
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ RecordWriteField(rbx,
+ RegExpImpl::kLastSubjectOffset,
+ rax,
+ rdi,
+ kDontSaveFPRegs);
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+ __ RecordWriteField(rbx,
+ RegExpImpl::kLastInputOffset,
+ rax,
+ rdi,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
__ LoadAddress(rcx,
@@ -2727,7 +3059,28 @@
__ Throw(rax);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, rax);
+ __ ThrowUncatchable(rax);
+
+ // External string. Short external strings have already been ruled out.
+ // rdi: subject string (expected to be external)
+ // rbx: scratch
+ __ bind(&external_string);
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ testb(rbx, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, "external string expected, but not found");
+ }
+ __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ testb(rbx, Immediate(kStringEncodingMask));
+ __ j(not_zero, &seq_ascii_string);
+ __ jmp(&seq_two_byte_string);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
@@ -3132,7 +3485,7 @@
__ JumpIfNotBothSequentialAsciiStrings(
rdx, rax, rcx, rbx, &check_unequal_objects);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
@@ -3231,7 +3584,52 @@
}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // rbx : cache cell for call target
+ // rdi : the function to call
+ Isolate* isolate = masm->isolate();
+ Label initialize, done;
+
+ // Load the cache state into rcx.
+ __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmpq(rcx, rdi);
+ __ j(equal, &done, Label::kNear);
+ __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ j(equal, &done, Label::kNear);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ j(equal, &initialize, Label::kNear);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ jmp(&done, Label::kNear);
+
+ // An uninitialized cache is patched with the function.
+ __ bind(&initialize);
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // rdi : the function to call
+ // rbx : cache cell for call target
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -3252,10 +3650,6 @@
__ bind(&call);
}
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
// Check that the function really is a JavaScript function.
__ JumpIfSmi(rdi, &non_function);
// Goto slow case if we do not have a function.
@@ -3292,7 +3686,7 @@
__ push(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_FUNCTION);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
@@ -3314,14 +3708,80 @@
}
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // rax : number of arguments
+ // rbx : cache cell for call target
+ // rdi : constructor function
+ Label slow, non_function_call;
+
+ // Check that function is not a smi.
+ __ JumpIfSmi(rdi, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+ __ jmp(rbx);
+
+ // rdi: called object
+ // rax: number of arguments
+ // rcx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing rax).
+ __ Set(rbx, 0);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Throw exception in eax.
- __ Throw(rax);
+bool CEntryStub::IsPregenerated() {
+#ifdef _WIN64
+ return result_size_ == 1;
+#else
+ return true;
+#endif
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ // It is important that the store buffer overflow stubs are generated first.
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ save_doubles.GetCode()->set_is_pregenerated(true);
}
@@ -3465,12 +3925,6 @@
}
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, rax);
-}
-
-
void CEntryStub::Generate(MacroAssembler* masm) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
@@ -3534,22 +3988,34 @@
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ Set(rax, static_cast<int64_t>(false));
+ __ Store(external_caught, rax);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ Store(pending_exception, rax);
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(rax);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(rax);
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
- // Setup frame.
+ // Set up frame.
__ push(rbp);
__ movq(rbp, rsp);
@@ -3605,20 +4071,23 @@
__ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ Store(pending_exception, rax);
__ movq(rax, Failure::Exception(), RelocInfo::NONE);
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
// Clear any pending exceptions.
__ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -3627,11 +4096,11 @@
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return. We load the address from an
+ // external reference instead of inlining the call target address directly
+ // in the code, because the builtin stubs may not have been generated yet
+ // at the time this code is generated.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
@@ -3740,7 +4209,7 @@
__ bind(&miss);
}
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
@@ -3757,14 +4226,17 @@
__ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
+ // Get return address and delta to inlined map check.
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
__ Assert(equal, "InstanceofStub unexpected call site cache (check).");
}
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, kOffsetToMapCheckValue));
+ __ movq(Operand(kScratchRegister, 0), rax);
}
__ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
@@ -3791,9 +4263,11 @@
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of true in the root array at the inline check site.
- ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB0 - 0x100);
- __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
+ int true_offset = 0x100 +
+ (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+ // Assert it is a 1-byte signed value.
+ ASSERT(true_offset >= 0 && true_offset < 0x100);
+ __ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3812,9 +4286,11 @@
__ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of false in the root array at the inline check site.
- ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB8 - 0x100);
- __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
+ int false_offset = 0x100 +
+ (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+ // Assert it is a 1-byte signed value.
+ ASSERT(false_offset >= 0 && false_offset < 0x100);
+ __ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3904,85 +4380,25 @@
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ SmiToInteger32(index_, index_);
- // Handle non-flat strings.
- __ and_(result_, Immediate(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmpb(result_, Immediate(kExternalStringTag));
- __ j(greater, &sliced_string);
- __ j(equal, &call_runtime_);
+ StringCharLoadGenerator::Generate(
+ masm, object_, index_, result_, &call_runtime_);
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string, Label::kNear);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
- __ bind(&assure_seq_string);
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
- __ jmp(&flat_string);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
__ Integer32ToSmi(result_, result_);
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
Factory* factory = masm->isolate()->factory();
@@ -3995,7 +4411,6 @@
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
- __ push(index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4004,19 +4419,18 @@
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(rax)) {
+ if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(scratch_, rax);
+ __ movq(index_, rax);
}
- __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4026,6 +4440,7 @@
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
__ push(object_);
+ __ Integer32ToSmi(index_, index_);
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(rax)) {
@@ -4058,7 +4473,8 @@
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -4085,14 +4501,15 @@
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
@@ -4101,14 +4518,14 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(rax, &string_add_runtime);
+ __ JumpIfSmi(rax, &call_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
// First argument is a a string, test second.
- __ JumpIfSmi(rdx, &string_add_runtime);
+ __ JumpIfSmi(rdx, &call_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -4174,9 +4591,9 @@
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
- // Check that both strings are non-external ascii strings.
+ // Check that both strings are non-external ASCII strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
+ &call_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -4191,20 +4608,30 @@
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
- __ Set(rbx, 2);
- __ jmp(&make_flat_ascii_string);
+ __ Set(rdi, 2);
+ __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
+ // rbx - first byte: first character
+ // rbx - second byte: *maybe* second character
+ // Make sure that the second byte of rbx contains the second character.
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ shll(rcx, Immediate(kBitsPerByte));
+ __ orl(rbx, rcx);
+ // Write both characters to the new string.
+ __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ IncrementCounter(counters->string_add_native(), 1);
+ __ ret(2 * kPointerSize);
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+ __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
__ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &string_add_runtime);
+ __ j(above, &call_runtime);
// If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
+ // both strings are ASCII the result is an ASCII cons string.
// rax: first string
// rbx: length of resulting flat string
// rdx: second string
@@ -4218,8 +4645,8 @@
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+ // Allocate an ASCII cons string.
+ __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
@@ -4232,7 +4659,7 @@
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
+ // to contain only ASCII characters.
// rcx: first instance type AND second instance type.
// r8: first instance type.
// r9: second instance type.
@@ -4244,111 +4671,103 @@
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
__ jmp(&allocated);
- // Handle creating a flat result. First check that both strings are not
- // external strings.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// rax: first string
// rbx: length of resulting flat string as smi
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
+ Label first_prepared, second_prepared;
+ Label first_is_sequential, second_is_sequential;
__ bind(&string_add_flat_result);
- __ SmiToInteger32(rbx, rbx);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // We cannot encounter sliced strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
- // Now check if both strings are ascii strings.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
+
+ __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
+ // r14: length of first string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(r8, Immediate(kStringRepresentationMask));
+ __ j(zero, &first_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ testb(r8, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &call_runtime);
+ __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
+ __ jmp(&first_prepared, Label::kNear);
+ __ bind(&first_is_sequential);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ bind(&first_prepared);
+
+ // Check whether both strings have same encoding.
+ __ xorl(r8, r9);
+ __ testb(r8, Immediate(kStringEncodingMask));
+ __ j(not_zero, &call_runtime);
+
+ __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
+ // r15: length of second string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(r9, Immediate(kStringRepresentationMask));
+ __ j(zero, &second_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ testb(r9, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &call_runtime);
+ __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
+ __ jmp(&second_prepared, Label::kNear);
+ __ bind(&second_is_sequential);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ bind(&second_prepared);
+
Label non_ascii_string_add_flat_result;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(r8, Immediate(kStringEncodingMask));
+ // r9: instance type of second string
+ // First string and second string have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ SmiToInteger32(rbx, rbx);
+ __ testb(r9, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kStringEncodingMask));
- __ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
+ // Both strings are ASCII strings. As they are short they are both flat.
+ __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
+ // rax: result string
// Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
+ __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // rcx: first char of first string
+ // rbx: first character of result
+ // r14: length of first string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
+ // rbx: next character of result
+ // rdx: first char of second string
+ // r15: length of second string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
__ bind(&non_ascii_string_add_flat_result);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ and_(r9, Immediate(kStringEncodingMask));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
+ // Both strings are ASCII strings. As they are short they are both flat.
+ __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
+ // rax: result string
// Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
+ __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // rcx: first char of first string
+ // rbx: first character of result
+ // r14: length of first string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
+ // rbx: next character of result
+ // rdx: first char of second string
+ // r15: length of second string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -4566,7 +4985,12 @@
__ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
__ j(equal, not_found);
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ cmpq(kScratchRegister, candidate);
+ __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+ }
__ jmp(&next_probe[i]);
__ bind(&is_string);
@@ -4580,7 +5004,7 @@
// JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
Register temp = kScratchRegister;
- // Check that the candidate is a non-external ascii string.
+ // Check that the candidate is a non-external ASCII string.
__ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
temp, temp, &next_probe[i]);
@@ -4695,8 +5119,12 @@
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
- Label return_rax;
- __ j(equal, &return_rax);
+ Label not_original_string;
+ __ j(not_equal, ¬_original_string, Label::kNear);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(kArgumentsSize);
+ __ bind(¬_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
@@ -4715,71 +5143,77 @@
// Get the two characters forming the sub string.
__ SmiToInteger32(rdx, rdx); // From index is no longer smi.
__ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx,
+ __ movzxbq(rdi,
FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+ masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ movq(rax, Operand(rsp, kStringOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Set up registers for allocating the two character string.
+ __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+ __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+ __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&result_longer_than_two);
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length
+ // rdx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ testb(rbx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ __ testb(rbx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &runtime);
+ __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ Set(rcx, 2);
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
+ __ movq(rdi, rax);
+
+ __ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
+ // rdi: underlying subject string
+ // rbx: instance type of underlying subject string
+ // rdx: adjusted start index (smi)
+ // rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- STATIC_ASSERT(2 < SlicedString::kMinLength);
- __ jmp(©_routine);
- __ bind(&result_longer_than_two);
-
- // rax: string
- // rbx: instance type
- // rcx: sub string length
- // rdx: from index (smi)
- Label allocate_slice, sliced_string, seq_string;
__ cmpq(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, ©_routine);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(rbx, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ j(zero, &runtime);
-
- __ testb(rbx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
- __ movq(rdi, rax);
-
- __ bind(&allocate_slice);
- // edi: underlying subject string
- // ebx: instance type of original subject string
- // edx: offset
- // ecx: length
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@@ -4790,93 +5224,96 @@
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(rax, rbx, no_reg, &runtime);
+ __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
__ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(rax, rbx, no_reg, &runtime);
+ __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ Integer32ToSmi(rcx, rcx);
__ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
__ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ jmp(&return_rax);
+ __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+ __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(kArgumentsSize);
__ bind(©_routine);
- } else {
- __ bind(&result_longer_than_two);
}
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
+ // rdi: underlying subject string
+ // rbx: instance type of underlying subject string
+ // rdx: adjusted start index (smi)
+ // rcx: length
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(rbx, Immediate(kExternalStringTag));
+ __ j(zero, &sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ testb(rbx, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &runtime);
+ __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ testb(rbx, Immediate(kStringEncodingMask));
+ __ j(zero, &two_byte_sequential);
// Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+ __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
// rax: result string
// rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ __ movq(r14, rsi); // esi used by following code.
+ { // Locate character of sub string start.
+ SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
+ __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
+ // r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- Counters* counters = masm->isolate()->counters();
+ __ movq(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
+ __ bind(&two_byte_sequential);
// Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+ __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
// rax: result string
// rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ __ movq(r14, rsi); // esi used by following code.
+ { // Locate character of sub string start.
+ SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
+ __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
+ // r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
-
- __ bind(&return_rax);
+ __ movq(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
@@ -5017,7 +5454,7 @@
__ movb(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, near_jump);
- __ addq(index, Immediate(1));
+ __ incq(index);
__ j(not_zero, &loop);
}
@@ -5047,7 +5484,7 @@
// Check that both are sequential ASCII strings.
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ pop(rcx);
@@ -5090,15 +5527,15 @@
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
- Label unordered;
+ Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
Condition either_smi = masm->CheckEitherSmi(rax, rdx);
__ j(either_smi, &generic_stub, Label::kNear);
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
// Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -5119,11 +5556,25 @@
__ ret(0);
__ bind(&unordered);
-
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ Cmp(rax, masm->isolate()->factory()->undefined_value());
+ __ j(not_equal, &miss);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &unordered);
+ }
+
__ bind(&miss);
GenerateMiss(masm);
}
@@ -5174,9 +5625,10 @@
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
- ASSERT(GetCondition() == equal);
Label miss;
+ bool equality = Token::IsEqualityOp(op_);
+
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
@@ -5214,24 +5666,31 @@
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
- Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ ret(0);
+ if (equality) {
+ Label do_compare;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &do_compare, Label::kNear);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ ret(0);
+ __ bind(&do_compare);
+ }
// Check that both strings are sequential ASCII.
Label runtime;
- __ bind(&do_compare);
__ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
// Compare flat ASCII strings. Returns when done.
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
+ }
// Handle more complex cases in runtime.
__ bind(&runtime);
@@ -5239,7 +5698,11 @@
__ push(left);
__ push(right);
__ push(tmp1);
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -5266,49 +5729,62 @@
}
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss, Label::kNear);
+
+ __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ Cmp(rcx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ Cmp(rbx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ pop(rax);
+ __ pop(rdx);
+ }
// Do a tail call to the rewritten stub.
__ jmp(rdi);
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
- // (their names are the null value).
+ // (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// r0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
@@ -5337,11 +5813,18 @@
__ Cmp(entity_name, Handle<String>(name));
__ j(equal, miss);
+ Label the_hole;
+ // Check for the hole and skip.
+ __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &the_hole, Label::kNear);
+
// Check if the entry name is not a symbol.
__ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, miss);
+
+ __ bind(&the_hole);
}
StringDictionaryLookupStub stub(properties,
@@ -5350,12 +5833,10 @@
StringDictionaryLookupStub::NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
__ push(Immediate(name->Hash()));
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ testq(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
- return result;
}
@@ -5370,6 +5851,11 @@
Register name,
Register r0,
Register r1) {
+ ASSERT(!elements.is(r0));
+ ASSERT(!elements.is(r1));
+ ASSERT(!name.is(r0));
+ ASSERT(!name.is(r1));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -5412,6 +5898,8 @@
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
// esp[0 * kPointerSize]: return address.
// esp[1 * kPointerSize]: key's hash.
@@ -5497,6 +5985,369 @@
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField and
+ // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
+ // GenerateStoreField calls the stub with two different permutations of
+ // registers. This is the second.
+ { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
+ // StoreIC::GenerateNormal via GenerateDictionaryStore.
+ { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
+ // KeyedStoreIC::GenerateGeneric.
+ { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
+ { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
+ { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
+ // StoreArrayLiteralElementStub::Generate
+ { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+ // Null termination.
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
+};
+
+#undef REG
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(),
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+ Register address =
+ arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(arg1));
+ __ Move(address, regs_.address());
+ __ Move(arg1, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg2, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ movq(arg2, Operand(address, 0));
+ }
+ __ LoadAddress(arg3, ExternalReference::isolate_address());
+ int argument_count = 3;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_object;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &on_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ zero,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need an extra register for this, so we push the object register
+ // temporarily.
+ __ push(regs_.object());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object,
+ Label::kNear);
+ __ pop(regs_.object());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&need_incremental_pop_object);
+ __ pop(regs_.object());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : element value to store
+ // -- rbx : array literal
+ // -- rdi : map of array literal
+ // -- rcx : element index as smi
+ // -- rdx : array literal index in function
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(rdi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(rax, &smi_element);
+ __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+
+ __ bind(&slow_elements);
+ __ pop(rdi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rax);
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ push(rdx);
+ __ push(rdi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ movq(Operand(rcx, 0), rax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(rbx, rcx, rax,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize), rax);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r11, rcx);
+ __ StoreNumberToDoubleElements(rax,
+ r9,
+ r11,
+ xmm0,
+ &slow_elements);
+ __ ret(0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 4058118..6a1a18f 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -48,6 +48,8 @@
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
+ static void GenerateOperation(MacroAssembler* masm,
+ TranscendentalCache::Type type);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
@@ -55,7 +57,32 @@
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated() { return true; }
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
@@ -124,7 +151,7 @@
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -210,7 +237,7 @@
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -397,13 +424,12 @@
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -413,6 +439,8 @@
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -425,7 +453,7 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -446,6 +474,246 @@
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 7);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers, where the third
+ // is always rcx (needed for shift operations). The input is two registers
+ // that must be preserved and one scratch register provided by the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_orig_(object),
+ address_orig_(address),
+ scratch0_orig_(scratch0),
+ object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
+ if (scratch0.is(rcx)) {
+ scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
+ }
+ if (object.is(rcx)) {
+ object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
+ }
+ if (address.is(rcx)) {
+ address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
+ }
+ ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!address_orig_.is(object_));
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+ // We don't have to save scratch0_orig_ because it was given to us as
+ // a scratch register. But if we had to switch to a different reg then
+ // we should save the new scratch0_.
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!rcx.is(scratch0_orig_) &&
+ !rcx.is(object_orig_) &&
+ !rcx.is(address_orig_)) {
+ masm->push(rcx);
+ }
+ masm->push(scratch1_);
+ if (!address_.is(address_orig_)) {
+ masm->push(address_);
+ masm->movq(address_, address_orig_);
+ }
+ if (!object_.is(object_orig_)) {
+ masm->push(object_);
+ masm->movq(object_, object_orig_);
+ }
+ }
+
+ void Restore(MacroAssembler* masm) {
+ // These will have been preserved the entire time, so we just need to move
+ // them back. Only in one case is the orig_ reg different from the plain
+ // one, since only one of them can alias with rcx.
+ if (!object_.is(object_orig_)) {
+ masm->movq(object_orig_, object_);
+ masm->pop(object_);
+ }
+ if (!address_.is(address_orig_)) {
+ masm->movq(address_orig_, address_);
+ masm->pop(address_);
+ }
+ masm->pop(scratch1_);
+ if (!rcx.is(scratch0_orig_) &&
+ !rcx.is(object_orig_) &&
+ !rcx.is(address_orig_)) {
+ masm->pop(rcx);
+ }
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+
+ // The three scratch registers (incl. rcx) will be restored by other means
+ // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
+ // save and don't need to be preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_orig_;
+ Register address_orig_;
+ Register scratch0_orig_;
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ // Third scratch register is always rcx.
+
+ Register GetRegThatIsNotRcxOr(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(rcx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 4> {};
+ class ValueBits: public BitField<int, 4, 4> {};
+ class AddressBits: public BitField<int, 8, 4> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 507bbd4..a8d39b2 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -38,17 +39,90 @@
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
}
#define __ masm.
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) {
+ // Fallback to library function if function cannot be created.
+ switch (type) {
+ case TranscendentalCache::SIN: return &sin;
+ case TranscendentalCache::COS: return &cos;
+ case TranscendentalCache::TAN: return &tan;
+ case TranscendentalCache::LOG: return &log;
+ default: UNIMPLEMENTED();
+ }
+ }
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ // Move double input into registers.
+ __ push(rbx);
+ __ push(rdi);
+ __ movq(rbx, xmm0);
+ __ push(rbx);
+ __ fld_d(Operand(rsp, 0));
+ TranscendentalCacheStub::GenerateOperation(&masm, type);
+ // The return value is expected to be in xmm0.
+ __ fstp_d(Operand(rsp, 0));
+ __ pop(rbx);
+ __ movq(xmm0, rbx);
+ __ pop(rdi);
+ __ pop(rbx);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) return &sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ // Move double input into registers.
+ __ sqrtsd(xmm0, xmm0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
// Define custom fmod implementation.
@@ -139,6 +213,367 @@
#endif
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // The fail label is not actually used since we do not allocate.
+ Label allocated, new_backing_store, only_change_map, done;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &only_change_map);
+
+ // Check backing store for COW-ness. For COW arrays we have to
+ // allocate a new backing store.
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &new_backing_store);
+ // Check if the backing store is in new-space. If not, we need to allocate
+ // a new one since the old one is in pointer-space.
+ // If in new space, we can reuse the old backing store because it is
+ // the same size.
+ __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
+
+ __ movq(r14, r8); // Destination array equals source array.
+
+ // r8 : source FixedArray
+ // r9 : elements array length
+ // r14: destination FixedDoubleArray
+ // Set backing store's map
+ __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+
+ __ bind(&allocated);
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Convert smis to doubles and holes to hole NaNs. The Array's length
+ // remains unchanged.
+ STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+ Label loop, entry, convert_hole;
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ // r15: the-hole NaN
+ __ jmp(&entry);
+
+ // Allocate new backing store.
+ __ bind(&new_backing_store);
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ // Set backing store's map
+ __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ // Set receiver's backing store.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movq(r11, r14);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Set backing store's length.
+ __ Integer32ToSmi(r11, r9);
+ __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ jmp(&allocated);
+
+ __ bind(&only_change_map);
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&done);
+
+ // Conversion loop.
+ __ bind(&loop);
+ __ movq(rbx,
+ FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+ // r9 : current element's index
+ // rbx: current element (smi-tagged)
+ __ JumpIfNotSmi(rbx, &convert_hole);
+ __ SmiToInteger32(rbx, rbx);
+ __ cvtlsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ jmp(&entry);
+ __ bind(&convert_hole);
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+ __ Assert(equal, "object found in smi-only array");
+ }
+
+ __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
+ __ bind(&entry);
+ __ decq(r9);
+ __ j(not_sign, &loop);
+
+ __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &only_change_map);
+
+ __ push(rax);
+
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ // r8 : source FixedDoubleArray
+ // r9 : number of elements
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ // r11: destination FixedArray
+ __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ Integer32ToSmi(r14, r9);
+ __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+
+ // Prepare for conversion loop.
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
+ // rsi: the-hole NaN
+ // rdi: pointer to the-hole
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ __ bind(&loop);
+ __ movq(r14, FieldOperand(r8,
+ r9,
+ times_pointer_size,
+ FixedDoubleArray::kHeaderSize));
+ // r9 : current element's index
+ // r14: current element
+ __ cmpq(r14, rsi);
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(rax, r15, &gc_required);
+ // rax: new heap number
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rax);
+ __ movq(r15, r9);
+ __ RecordWriteArray(r11,
+ rax,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rdi);
+
+ __ bind(&entry);
+ __ decq(r9);
+ __ j(not_sign, &loop);
+
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ testb(result, Immediate(kIsIndirectStringMask));
+ __ j(zero, &check_sequential, Label::kNear);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ testb(result, Immediate(kSlicedNotConsMask));
+ __ j(zero, &cons_string, Label::kNear);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+ __ addq(index, result);
+ __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, call_runtime);
+ __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label seq_string;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &seq_string, Label::kNear);
+
+ // Handle external strings.
+ Label ascii_external, done;
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ testb(result, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, "external string expected, but not found");
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ testb(result, Immediate(kShortExternalStringTag));
+ __ j(not_zero, call_runtime);
+ // Check encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ j(not_equal, &ascii_external, Label::kNear);
+ // Two-byte string.
+ __ movzxwl(result, Operand(result, index, times_2, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&ascii_external);
+ // Ascii string.
+ __ movzxbl(result, Operand(result, index, times_1, 0));
+ __ jmp(&done, Label::kNear);
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii;
+ __ bind(&seq_string);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii, Label::kNear);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzxwl(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii);
+ __ movzxbl(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&done);
+}
#undef __
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index a0648ce..2e80751 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -69,6 +69,21 @@
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index ae5045f..80e22c6 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,7 +27,7 @@
// CPU specific code for x64 independent of OS goes here.
-#ifdef __GNUC__
+#if defined(__GNUC__) && !defined(__MINGW64__)
#include "third_party/valgrind/valgrind.h"
#endif
@@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 423e6f2..eec83d9 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -100,65 +100,66 @@
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as as two smis causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ ASSERT(!reg.is(kScratchRegister));
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ // Store the 64-bit value as two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ movq(kScratchRegister, reg);
+ __ Integer32ToSmi(reg, reg);
+ __ push(reg);
+ __ sar(kScratchRegister, Immediate(32));
+ __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+ __ push(kScratchRegister);
+ }
}
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+ __ Set(rax, 0); // No arguments (argc == 0).
+ __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
- __ CallStub(&ceb);
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, kDebugZapValue);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ // Reconstruct the 64-bit value from two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ shl(kScratchRegister, Immediate(32));
+ __ pop(reg);
+ __ SmiToInteger32(reg, reg);
+ __ or_(reg, kScratchRegister);
+ }
}
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
+
+ // Get rid of the internal frame.
}
- // Get rid of the internal frame.
- __ LeaveInternalFrame();
-
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
@@ -228,8 +229,36 @@
}
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x64.cc).
+ // ----------- S t a t e -------------
+ // -- rax: return value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-x64.cc).
+ // ----------- S t a t e -------------
+ // -- rdi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-x64.cc).
+ // ----------- S t a t e -------------
+ // -- rdi : function
+ // -- rbx: cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
// above IC call.
// ----------- S t a t e -------------
@@ -240,21 +269,16 @@
}
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x64.cc).
+ // rax is the actual number of arguments not encoded as a smi, see comment
+ // above IC call.
// ----------- S t a t e -------------
- // -- rax: return value
+ // -- rax: number of arguments
+ // -- rbx: cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
- // ----------- S t a t e -------------
- // No registers used on entry.
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
+ // The number of arguments in rax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
}
@@ -263,9 +287,7 @@
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index f322312..2adf587 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -87,13 +87,19 @@
#endif
}
+ Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -105,7 +111,8 @@
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -131,14 +138,18 @@
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -146,13 +157,16 @@
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, check_code);
}
@@ -192,12 +206,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -233,9 +248,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -300,7 +313,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
@@ -324,13 +337,219 @@
}
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc_value);
+}
+
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = 6 * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+ // Construct stub can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function (construct sentinel)\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ // The newly allocated object was passed as receiver in the artificial
+ // constructor stub environment created by HEnvironment::CopyForInlining().
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; allocated receiver\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ construct_stub->instruction_start() +
+ isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -350,9 +569,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -440,6 +657,7 @@
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -598,7 +816,10 @@
Isolate* isolate = masm()->isolate();
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -644,8 +865,11 @@
__ PrepareCallCFunction(2);
__ movq(arg1, rax);
__ LoadAddress(arg2, ExternalReference::isolate_address());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 2);
+ }
__ pop(rax);
// Replace the current frame with the output frames.
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 1b8871f..adeda0b 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -34,6 +34,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "disasm.h"
+#include "lazy-instance.h"
namespace disasm {
@@ -109,6 +110,7 @@
{ 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" },
{ 0xF4, UNSET_OP_ORDER, "hlt" },
+ { 0xFC, UNSET_OP_ORDER, "cld" },
{ 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" },
{ 0x61, UNSET_OP_ORDER, "popad" },
@@ -268,7 +270,8 @@
}
-static InstructionTable instruction_table;
+static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+ LAZY_INSTANCE_INITIALIZER;
static InstructionDesc cmov_instructions[16] = {
@@ -910,15 +913,19 @@
switch (modrm_byte) {
case 0xE0: mnem = "fchs"; break;
case 0xE1: mnem = "fabs"; break;
+ case 0xE3: mnem = "fninit"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF2: mnem = "fptan"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -1034,7 +1041,18 @@
}
} else {
get_modrm(*current, &mod, ®op, &rm);
- if (opcode == 0x28) {
+ if (opcode == 0x1f) {
+ current++;
+ if (rm == 4) { // SIB byte present.
+ current++;
+ }
+ if (mod == 1) { // Byte displacement.
+ current += 1;
+ } else if (mod == 2) { // 32-bit displacement.
+ current += 4;
+ } // else no immediate displacement.
+ AppendToBuffer("nop");
+ } else if (opcode == 0x28) {
AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1178,7 +1196,7 @@
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
current++;
- if (regop == 4) { // SIB byte present.
+ if (rm == 4) { // SIB byte present.
current++;
}
if (mod == 1) { // Byte displacement.
@@ -1322,7 +1340,7 @@
data++;
}
- const InstructionDesc& idesc = instruction_table.Get(current);
+ const InstructionDesc& idesc = instruction_table.Get().Get(current);
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 7012c76..3e3d63d 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,32 +31,32 @@
namespace v8 {
namespace internal {
-static const int kNumRegs = 16;
-static const RegList kJSCallerSaved =
+const int kNumRegs = 16;
+const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
1 << 2 | // rdx
1 << 3 | // rbx - used as a caller-saved register in JavaScript code
1 << 7; // rdi - callee function
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kContextOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
- static const int kStateOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
@@ -87,6 +87,9 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -112,6 +115,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 556523f..85c5e75 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,11 +44,6 @@
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -105,26 +100,54 @@
};
+int FullCodeGenerator::self_optimization_header_size() {
+ return 20;
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
// formal parameter count expected by the function.
//
// The live registers are:
-// o rdi: the JS function object being called (ie, ourselves)
+// o rdi: the JS function object being called (i.e. ourselves)
// o rsi: our context
// o rbp: our caller's frame pointer
// o rsp: stack pointer (pointing to return address)
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ // We can optionally optimize based on counters rather than statistical
+ // sampling.
+ if (info->ShouldSelfOptimize()) {
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[adding self-optimization header to %s]\n",
+ *info->function()->debug_name()->ToCString());
+ }
+ has_self_optimization_header_ = true;
+ MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
+ Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
+ JSGlobalPropertyCell* cell;
+ if (maybe_cell->To(&cell)) {
+ __ movq(rax, Handle<JSGlobalPropertyCell>(cell),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ SmiAddConstant(FieldOperand(rax, JSGlobalPropertyCell::kValueOffset),
+ Smi::FromInt(-1));
+ Handle<Code> compile_stub(
+ isolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ __ j(zero, compile_stub, RelocInfo::CODE_TARGET);
+ ASSERT(masm_->pc_offset() == self_optimization_header_size());
+ }
+ }
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -136,7 +159,7 @@
// with undefined when called as functions (without an explicit
// receiver object). rcx is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -147,6 +170,11 @@
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -195,11 +223,9 @@
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
+ // Update the write barrier. This clobbers rax and rbx.
+ __ RecordWriteContextSlot(
+ rsi, context_offset, rax, rbx, kDontSaveFPRegs);
}
}
}
@@ -226,9 +252,15 @@
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
+ ArgumentsAccessStub::Type type;
+ if (!is_classic_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -250,8 +282,11 @@
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL);
}
VisitDeclarations(scope()->declarations());
}
@@ -287,7 +322,8 @@
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -377,7 +413,7 @@
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -399,7 +435,7 @@
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -432,7 +468,7 @@
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -491,7 +527,7 @@
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -555,7 +591,7 @@
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -638,15 +674,16 @@
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ movq(location, src);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(scratch0, offset, src, scratch1);
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -657,13 +694,7 @@
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, NULL);
@@ -673,16 +704,17 @@
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
- FunctionLiteral* function,
- int* global_count) {
+ VariableMode mode,
+ FunctionLiteral* function) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
- ++(*global_count);
+ ++global_count_;
break;
case Variable::PARAMETER:
@@ -691,7 +723,7 @@
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ movq(StackOperand(variable), result_register());
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(StackOperand(variable), kScratchRegister);
@@ -715,10 +747,16 @@
VisitForAccumulatorValue(function);
__ movq(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
- __ movq(rbx, rsi);
- __ RecordWrite(rbx, offset, result_register(), rcx);
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(rsi,
+ offset,
+ result_register(),
+ rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -731,11 +769,13 @@
Comment cmnt(masm_, "[ Declaration");
__ push(rsi);
__ Push(variable->name());
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr =
+ (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -743,7 +783,7 @@
// must not destroy the current value.
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
@@ -755,9 +795,6 @@
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
@@ -871,6 +908,8 @@
__ cmpq(rax, null_value);
__ j(equal, &exit);
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert);
@@ -882,51 +921,17 @@
__ bind(&done_convert);
__ push(rax);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ __ j(below_equal, &call_runtime);
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
- Register empty_fixed_array_value = r8;
- __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r9;
- __ LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- __ movq(rcx, rax);
- __ bind(&next);
-
- // Check that there are no elements. Register rcx contains the
- // current JS object we've reached through the prototype chain.
- __ cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- __ j(not_equal, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in rbx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
- __ JumpIfSmi(rdx, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (rdx). This is the case if the next enumeration
- // index field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(rdx, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- __ cmpq(rcx, rax);
- __ j(equal, &check_prototype, Label::kNear);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmpq(rdx, empty_fixed_array_value);
- __ j(not_equal, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ cmpq(rcx, null_value);
- __ j(not_equal, &next);
+ __ CheckEnumCache(null_value, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -953,7 +958,7 @@
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
@@ -962,14 +967,33 @@
__ jmp(&loop);
// We got a fixed array in register rax. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ Push(Smi::FromInt(0)); // Map (0) - force slow check.
- __ push(rax);
+
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Object>(
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ __ LoadHeapObject(rbx, cell);
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+
+ __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
+ __ j(above, &non_proxy);
+ __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(rbx); // Smi
+ __ push(rax); // Array
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
__ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
@@ -983,17 +1007,22 @@
index.scale,
FixedArray::kHeaderSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ Cmp(rdx, Smi::FromInt(0));
+ __ j(equal, &update_each, Label::kNear);
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1010,7 +1039,7 @@
__ movq(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
+ EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
@@ -1021,7 +1050,7 @@
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1029,6 +1058,7 @@
__ addq(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1047,7 +1077,7 @@
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ Push(info);
__ CallStub(&stub);
} else {
@@ -1077,7 +1107,7 @@
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1091,7 +1121,7 @@
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1137,7 +1167,7 @@
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1168,16 +1198,23 @@
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ if (local->mode() == CONST) {
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST_HARMONY
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1208,23 +1245,63 @@
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == Variable::LET) {
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // Variable::CONST
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
}
- __ bind(&done);
- context()->Plug(rax);
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(rax, var);
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(rax);
+ break;
+ }
}
+ context()->Plug(var);
break;
}
@@ -1300,12 +1377,22 @@
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ PushRoot(Heap::kNullValueRootIndex);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_properties());
+ __ Push(constant_properties);
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1313,10 +1400,15 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ Push(Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1328,6 +1420,7 @@
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1350,9 +1443,9 @@
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1372,19 +1465,28 @@
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0));
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(rsp, 0));
@@ -1404,24 +1506,42 @@
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_elements());
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- __ CallStub(&stub);
+ __ Push(constant_elements);
+ Heap* heap = isolate()->heap();
+ if (has_constant_fast_elements &&
+ constant_elements_values->map() == heap->fixed_cow_array_map()) {
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ length);
+ __ CallStub(&stub);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
+ FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1444,14 +1564,28 @@
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), result_register());
-
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, result_register(), rcx);
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+ // transition and don't need to call the runtime stub.
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ // Store the subexpression value in the array's elements.
+ __ movq(FieldOperand(rbx, offset), result_register());
+ // Update the write barrier for the array store.
+ __ RecordWriteField(rbx, offset, result_register(), rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ // Store the subexpression value in the array's elements.
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ Move(rcx, Smi::FromInt(i));
+ __ Move(rdx, Smi::FromInt(expr->literal_index()));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1582,14 +1716,14 @@
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1666,7 +1800,7 @@
}
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1698,9 +1832,9 @@
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic);
break;
}
@@ -1711,14 +1845,13 @@
__ movq(rcx, rax);
__ pop(rdx);
__ pop(rax); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic);
break;
}
}
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(rax);
}
@@ -1729,9 +1862,9 @@
// Global var, const, or let.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -1756,13 +1889,13 @@
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ __ Push(Smi::FromInt(language_mode()));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1777,12 +1910,14 @@
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1795,14 +1930,15 @@
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ __ Push(Smi::FromInt(language_mode()));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
@@ -1834,9 +1970,9 @@
} else {
__ pop(rdx);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1874,9 +2010,9 @@
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1981,6 +2117,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -1990,8 +2127,7 @@
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(rsp, arg_count * kPointerSize));
@@ -2002,17 +2138,14 @@
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ Push(Smi::FromInt(strict_mode));
+ // Push the language mode.
+ __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Push the start position of the scope the calls resides in.
+ __ Push(Smi::FromInt(scope()->start_position()));
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2043,27 +2176,10 @@
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly in
- // generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(rax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and resolve
// eval.
__ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2073,6 +2189,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2175,14 +2292,29 @@
__ Set(rax, arg_count);
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ // Record call targets in unoptimized code, but not in the snapshot.
+ CallFunctionFlags flags;
+ if (!Serializer::enabled()) {
+ flags = RECORD_CALL_TARGET;
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ Move(rbx, cell);
+ } else {
+ flags = NO_CALL_FUNCTION_FLAGS;
+ }
+
+ CallConstructStub stub(flags);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2194,7 +2326,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -2202,7 +2334,8 @@
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2214,7 +2347,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
Split(non_negative_smi, if_true, if_false, fall_through);
@@ -2222,7 +2355,8 @@
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2246,14 +2380,15 @@
__ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2267,14 +2402,15 @@
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2290,7 +2426,7 @@
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2298,7 +2434,8 @@
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2374,12 +2511,13 @@
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2393,14 +2531,15 @@
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2414,14 +2553,15 @@
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2435,7 +2575,7 @@
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2443,8 +2583,8 @@
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2467,14 +2607,15 @@
__ bind(&check_frame_marker);
__ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2490,14 +2631,15 @@
__ pop(rbx);
__ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in rdx and the formal
@@ -2511,8 +2653,8 @@
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
@@ -2534,7 +2676,8 @@
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2545,20 +2688,24 @@
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
// Map is now in rax.
__ j(below, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ j(equal, &function);
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- __ j(above_equal, &function);
+ __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ j(equal, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
- // Check if the constructor in the map is a function.
+ // Check if the constructor in the map is a JS function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
@@ -2590,7 +2737,7 @@
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2598,6 +2745,7 @@
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2610,8 +2758,8 @@
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2630,9 +2778,12 @@
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
+ __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+
#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
+ __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
#endif
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -2652,9 +2803,10 @@
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2664,9 +2816,10 @@
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2677,7 +2830,8 @@
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2695,18 +2849,68 @@
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done;
+ Register object = rax;
+ Register result = rax;
+ Register scratch = rcx;
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ movq(scratch, stamp);
+ __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2);
+#ifdef _WIN64
+ __ movq(rcx, object);
+ __ movq(rdx, index, RelocInfo::NONE);
+#else
+ __ movq(rdi, object);
+ __ movq(rsi, index, RelocInfo::NONE);
+#endif
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&done);
+ }
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
@@ -2726,14 +2930,15 @@
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ movq(rdx, rax);
- __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+ __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -2745,7 +2950,8 @@
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2763,7 +2969,8 @@
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2771,7 +2978,6 @@
Register object = rbx;
Register index = rax;
- Register scratch = rcx;
Register result = rdx;
__ pop(object);
@@ -2781,7 +2987,6 @@
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2810,7 +3015,8 @@
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2818,8 +3024,7 @@
Register object = rbx;
Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
+ Register scratch = rdx;
Register result = rax;
__ pop(object);
@@ -2829,8 +3034,7 @@
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2859,7 +3063,8 @@
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2871,7 +3076,8 @@
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2883,10 +3089,11 @@
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2894,10 +3101,11 @@
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2905,10 +3113,23 @@
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallStub(&stub);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2916,8 +3137,9 @@
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -2925,7 +3147,8 @@
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -2934,18 +3157,31 @@
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
+ __ j(equal, &proxy);
+
// InvokeFunction requires the function in rdi. Move it in there.
__ movq(rdi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(rdi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(rax);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2955,7 +3191,8 @@
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3010,14 +3247,33 @@
__ movq(Operand(index_2, 0), object);
__ movq(Operand(index_1, 0), temp);
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ temp,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &no_remembered_set,
+ Label::kNear);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
- __ movq(object, elements);
- __ RecordWriteHelper(object, index_1, temp);
- __ RecordWriteHelper(elements, index_2, temp);
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index_1,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index_2,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
+
// We are done. Drop elements from the stack, and return undefined.
__ addq(rsp, Immediate(3 * kPointerSize));
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3031,7 +3287,8 @@
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3087,7 +3344,8 @@
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = rax;
@@ -3125,7 +3383,8 @@
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3139,7 +3398,7 @@
__ testl(FieldOperand(rax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ j(zero, if_true);
__ jmp(if_false);
@@ -3147,7 +3406,8 @@
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3163,10 +3423,11 @@
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
@@ -3352,7 +3613,7 @@
// One-character separator case
__ bind(&one_char_separator);
- // Get the separator ascii character value.
+ // Get the separator ASCII character value.
// Register "string" holds the separator.
__ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ Set(index, 0);
@@ -3496,14 +3757,16 @@
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ Push(Smi::FromInt(strict_mode_flag));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
@@ -3545,17 +3808,41 @@
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ }
+ __ bind(&done);
}
break;
}
@@ -3760,9 +4047,9 @@
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3777,9 +4064,9 @@
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3827,20 +4114,25 @@
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(rax, if_true);
@@ -3875,9 +4167,11 @@
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
- Split(above_equal, if_true, if_false, fall_through);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+ __ j(equal, if_true);
+ __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+ Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
@@ -3895,18 +4189,7 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -3914,6 +4197,10 @@
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
@@ -3923,20 +4210,13 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -3945,7 +4225,7 @@
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
@@ -3959,33 +4239,25 @@
case Token::EQ_STRICT:
case Token::EQ:
cc = equal;
- __ pop(rdx);
break;
case Token::LT:
cc = less;
- __ pop(rdx);
break;
case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = greater;
break;
case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = less_equal;
break;
case Token::GTE:
cc = greater_equal;
- __ pop(rdx);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4005,7 +4277,7 @@
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
@@ -4017,8 +4289,9 @@
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4026,14 +4299,20 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- if (expr->is_strict()) {
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(rax, nil_value);
+ if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ j(equal, if_true);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(rax, other_nil_value);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9d55594..0632ce4 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -221,7 +221,7 @@
// Update write barrier. Make sure not to clobber the value.
__ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0);
+ __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -462,30 +462,58 @@
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
__ xor_(rcx, rdi);
- __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
+ int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+ __ and_(rcx, Immediate(mask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ movq(rdi, rcx);
+ __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ LoadAddress(kScratchRegister, cache_keys);
+ int off = kPointerSize * i * 2;
+ __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ j(not_equal, &try_next_entry);
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
+ __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
- __ j(above_equal, &property_array_property);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ addl(rcx, Immediate(i));
+ }
+ __ LoadAddress(kScratchRegister, cache_field_offsets);
+ __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subq(rdi, rcx);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
@@ -531,14 +559,12 @@
Register receiver = rdx;
Register index = rax;
- Register scratch1 = rbx;
- Register scratch2 = rcx;
+ Register scratch = rcx;
Register result = rax;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -606,45 +632,42 @@
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra;
+ Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
+ Label transition_smi_elements, finish_object_store, non_double_value;
+ Label transition_double_elements;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(r9, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index);
__ SmiToInteger32(rcx, rcx);
- __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
+ __ CmpInstanceType(r9, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
- __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+ __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow);
- __ CmpInstanceType(rbx, JS_PROXY_TYPE);
- __ j(equal, &slow);
- __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
+ // Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
// rcx: index
- __ j(above, &fast);
+ __ j(above, &fast_object_with_map_check);
// Slow case: call runtime.
__ bind(&slow);
@@ -666,9 +689,20 @@
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_extra_double);
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast);
+ __ jmp(&fast_object_without_map_check);
+
+ __ bind(&check_extra_double);
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -678,9 +712,6 @@
// rdx: receiver (a JSArray)
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -688,30 +719,100 @@
__ j(below_equal, &extra);
// Fast case: Do the store.
- __ bind(&fast);
+ __ bind(&fast_object_with_map_check);
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
+ // rdx: receiver (a JSArray)
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
Label non_smi_value;
+ __ JumpIfNotSmi(rax, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
+
__ bind(&non_smi_value);
- // Slow case that needs to retain rcx for use by RecordWrite.
- // Update write barrier for the elements array address.
- __ movq(rdx, rax);
- __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
+ // Writing a non-smi, check whether array allows non-smi elements.
+ // r9: receiver's map
+ __ CheckFastObjectElements(r9, &transition_smi_elements);
+ __ bind(&finish_object_store);
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ movq(rdx, rax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
+
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ bind(&fast_double_without_map_check);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
+ &transition_double_elements);
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ rbx,
+ rdi,
+ &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ &slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
}
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -721,7 +822,7 @@
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -794,7 +895,7 @@
// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -821,10 +922,10 @@
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -846,21 +947,22 @@
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
+ // Push the receiver and the name of the function.
+ __ push(rdx);
+ __ push(rcx);
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
+ // Call the entry.
+ CEntryStub stub(1);
+ __ Set(rax, 2);
+ __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+ __ CallStub(&stub);
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- __ LeaveInternalFrame();
+ // Move result to rdi and exit the internal frame.
+ __ movq(rdi, rax);
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -881,7 +983,7 @@
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -913,39 +1015,6 @@
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1002,13 +1071,14 @@
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- __ EnterInternalFrame();
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rcx); // save the key
+ __ push(rdx); // pass the receiver
+ __ push(rcx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(rcx); // restore the key
+ }
__ movq(rdi, rax);
__ jmp(&do_call);
@@ -1072,27 +1142,12 @@
__ JumpIfSmi(rcx, &miss);
Condition cond = masm->IsObjectStringType(rcx, rax, rax);
__ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1212,7 +1267,12 @@
__ movq(mapped_location, rax);
__ lea(r9, mapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx, r9, r8);
+ __ RecordWrite(rbx,
+ r9,
+ r8,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
__ Ret();
__ bind(¬in);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1221,7 +1281,12 @@
__ movq(unmapped_location, rax);
__ lea(r9, unmapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx, r9, r8);
+ __ RecordWrite(rbx,
+ r9,
+ r8,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1408,11 +1473,10 @@
// -- rsp[0] : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1434,6 +1498,13 @@
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1562,6 +1633,51 @@
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
#undef __
@@ -1573,11 +1689,9 @@
case Token::LT:
return less;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
+ return greater;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
+ return less_equal;
case Token::GTE:
return greater_equal;
default:
@@ -1609,6 +1723,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index b82dc54..2ba2c57 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,9 +67,15 @@
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
+ HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -133,7 +139,7 @@
// when called as functions (without an explicit receiver
// object). rcx is zero for method calls and non-zero for function
// calls.
- if (info_->is_strict_mode() || info_->is_native()) {
+ if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -205,11 +211,8 @@
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
+ // Update the write barrier. This clobbers rax and rbx.
+ __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -260,6 +263,9 @@
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@@ -322,6 +328,12 @@
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -356,7 +368,19 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -492,10 +516,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -512,7 +540,6 @@
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -535,7 +562,6 @@
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
- ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
@@ -611,7 +637,7 @@
Safepoint::DeoptMode deopt_mode) {
ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->operands();
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deopt_mode);
@@ -973,11 +999,11 @@
DeoptimizeIf(no_condition, instr->environment());
}
} else if (right->IsStackSlot()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orl(kScratchRegister, ToOperand(right));
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
- __ or_(kScratchRegister, ToRegister(right));
+ __ orl(kScratchRegister, ToRegister(right));
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1142,8 +1168,13 @@
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Move(ToRegister(instr->result()), instr->value());
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ Move(ToRegister(instr->result()), value);
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -1192,6 +1223,49 @@
}
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(result));
+ ASSERT(object.is(rax));
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
+ __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ movq(kScratchRegister, stamp);
+ __ cmpq(kScratchRegister, FieldOperand(object,
+ JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2);
+#ifdef _WIN64
+ __ movq(rcx, object);
+ __ movq(rdx, index, RelocInfo::NONE);
+#else
+ __ movq(rdi, object);
+ __ movq(rsi, index, RelocInfo::NONE);
+#endif
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
@@ -1457,39 +1531,51 @@
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (left->IsRegister()) {
- __ cmpl(ToRegister(left), Immediate(value));
- } else {
- __ cmpl(ToOperand(left), Immediate(value));
- }
- } else if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- EmitCmpI(left, right);
- }
-
Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ int32_t value;
+ if (right->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(right));
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else if (left->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(left));
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(right), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(right), Immediate(value));
+ }
+ // We transposed the operands. Reverse the condition.
+ cc = ReverseCondition(cc);
+ } else {
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpl(ToRegister(left), ToOperand(right));
+ }
+ }
+ }
+ EmitBranch(true_block, false_block, cc);
+ }
}
@@ -1514,30 +1600,33 @@
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
-
int false_block = chunk_->LookupDestination(instr->false_block_id());
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
- // If the expression is known to untagged or smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- // Jump directly to the false block.
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(reg, nil_value);
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal);
} else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(reg, other_nil_value);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1590,6 +1679,30 @@
}
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+ return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsString(reg, temp, false_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1621,6 +1734,21 @@
}
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = TokenToCondition(op, false);
+ __ testq(rax, rax);
+
+ EmitBranch(true_block, false_block, condition);
+}
+
+
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1684,35 +1812,48 @@
// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register and possibly input (if it and temp are aliased).
+// Trashes the temp register.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String> class_name,
Register input,
- Register temp) {
- __ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ JumpIfSmi(input, is_false);
+
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(above_equal, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ __ j(equal, is_true);
+ __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+ __ j(equal, is_true);
} else {
- __ j(above_equal, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ j(above, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1741,6 +1882,7 @@
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1749,7 +1891,7 @@
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- EmitClassOfTest(true_label, false_label, class_name, input, temp);
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
EmitBranch(true_block, false_block, equal);
}
@@ -1790,9 +1932,8 @@
virtual void Generate() {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -1816,9 +1957,10 @@
Register map = ToRegister(instr->TempAt(0));
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- __ movq(kScratchRegister, factory()->the_hole_value(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(map, kScratchRegister); // Patched to cached map.
+ Handle<JSGlobalPropertyCell> cache_cell =
+ factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ cmpq(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -1856,7 +1998,7 @@
InstanceofStub stub(flags);
__ push(ToRegister(instr->InputAt(0)));
- __ Push(instr->function());
+ __ PushHeapObject(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -1900,9 +2042,6 @@
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
Label true_value, done;
__ testq(rax, rax);
__ j(condition, &true_value, Label::kNear);
@@ -1929,14 +2068,8 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- if (result.is(rax)) {
- __ load_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- }
- if (instr->hydrogen()->check_hole_value()) {
+ __ LoadGlobalCell(result, instr->hydrogen()->cell());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
@@ -1956,25 +2089,28 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(!value.is(temp));
- bool check_hole = instr->hydrogen()->check_hole_value();
- if (!check_hole && value.is(rax)) {
- __ store_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- return;
- }
+ Register value = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
- __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- if (check_hole) {
- __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // We have a temp because CompareRoot might clobber kScratchRegister.
+ Register cell = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(cell));
+ __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
+ // Store the value.
+ __ movq(Operand(cell, 0), value);
+ } else {
+ // Store the value.
+ __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(Operand(kScratchRegister, 0), value);
}
- __ movq(Operand(temp, 0), value);
+ // Cells are always rescanned, so no write barrier here.
}
@@ -1983,7 +2119,7 @@
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -1994,18 +2130,53 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&is_not_hole);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ movq(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
+
+ Operand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment);
+ }
+ }
+ __ movq(target, value);
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->TempAt(0));
- __ RecordWrite(context, offset, value, scratch);
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ scratch,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2025,9 +2196,9 @@
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2043,7 +2214,7 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2223,17 +2394,15 @@
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset);
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2305,6 +2474,7 @@
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2373,14 +2543,9 @@
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(rax)); // Used for parameter count.
- ASSERT(function.is(rdi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(rax));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2423,6 +2588,17 @@
__ movq(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(rax)); // Used for parameter count.
+ ASSERT(function.is(rdi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(rax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -2451,7 +2627,7 @@
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
- v8::internal::ParameterCount actual(rax);
+ ParameterCount actual(rax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2466,7 +2642,7 @@
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2484,6 +2660,14 @@
}
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ __ push(rsi); // The context is the first argument.
+ __ PushHeapObject(instr->hydrogen()->pairs());
+ __ Push(Smi::FromInt(instr->hydrogen()->flags()));
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
@@ -2501,34 +2685,47 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- }
-
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ Set(rax, arity);
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(rcx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- }
+ if (can_invoke_directly) {
+ __ LoadHeapObject(rdi, function);
- // Setup deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ }
+
+ // Set rax to arguments count if adaption is not needed. Assumes that rax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ Set(rax, arity);
+ }
+
+ // Invoke function.
+ __ SetCallKind(rcx, call_kind);
+ if (*function == *info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ } else {
+ // We need to adapt arguments.
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
// Restore context.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2537,7 +2734,6 @@
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2618,6 +2814,7 @@
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -2751,65 +2948,158 @@
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, double-precision
+ // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+ __ ucomisd(xmm_scratch, input_reg);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- XMMRegister left_reg = ToDoubleRegister(left);
- ASSERT(!left_reg.is(xmm1));
- LOperand* right = instr->InputAt(1);
- XMMRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers
- __ movaps(xmm0, left_reg);
- ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- } else if (exponent_type.IsInteger32()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers: xmm0 and edi (not rdi).
- // On Windows, the registers are xmm0 and edx.
- __ movaps(xmm0, left_reg);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+
+ // Choose register conforming to calling convention (when bailing out).
#ifdef _WIN64
- ASSERT(ToRegister(right).is(rdx));
+ Register exponent = rdx;
#else
- ASSERT(ToRegister(right).is(rdi));
+ Register exponent = rdi;
#endif
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 2);
- } else {
- ASSERT(exponent_type.IsTagged());
- Register right_reg = ToRegister(right);
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(exponent));
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiToInteger32(right_reg, right_reg);
- __ cvtlsi2sd(xmm1, right_reg);
- __ jmp(&call);
-
- __ bind(&non_smi);
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(exponent, &no_deopt);
+ __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
- __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers xmm0 and xmm1.
- __ movaps(xmm0, left_reg);
- // Right argument is already in xmm1.
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Return value is in xmm0.
- __ movaps(result_reg, xmm0);
- // Restore context register.
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ class DeferredDoRandom: public LDeferredCode {
+ public:
+ DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LRandom* instr_;
+ };
+
+ DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+
+ // Choose the right register for the first argument depending on
+ // calling convention.
+#ifdef _WIN64
+ ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
+ Register global_object = rcx;
+#else
+ ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ Register global_object = rdi;
+#endif
+
+ static const int kSeedSize = sizeof(uint32_t);
+ STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
+
+ __ movq(global_object,
+ FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
+ static const int kRandomSeedOffset =
+ FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
+ // rbx: FixedArray of the global context's random seeds
+
+ // Load state[0].
+ __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
+ // If state[0] == 0, call runtime to initialize seeds.
+ __ testl(rax, rax);
+ __ j(zero, deferred->entry());
+ // Load state[1].
+ __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+
+ // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+ // Only operate on the lower 32 bit of rax.
+ __ movl(rdx, rax);
+ __ andl(rdx, Immediate(0xFFFF));
+ __ imull(rdx, rdx, Immediate(18273));
+ __ shrl(rax, Immediate(16));
+ __ addl(rax, rdx);
+ // Save state[0].
+ __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+
+ // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+ __ movl(rdx, rcx);
+ __ andl(rdx, Immediate(0xFFFF));
+ __ imull(rdx, rdx, Immediate(36969));
+ __ shrl(rcx, Immediate(16));
+ __ addl(rcx, rdx);
+ // Save state[1].
+ __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+
+ // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+ __ shll(rax, Immediate(14));
+ __ andl(rcx, Immediate(0x3FFFF));
+ __ addl(rax, rcx);
+
+ __ bind(deferred->exit());
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm2, rcx);
+ __ movd(xmm1, rax);
+ __ cvtss2sd(xmm2, xmm2);
+ __ xorps(xmm1, xmm2);
+ __ subsd(xmm1, xmm2);
+}
+
+
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Return value is in rax.
}
@@ -2821,6 +3111,14 @@
}
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2860,6 +3158,9 @@
case kMathSin:
DoMathSin(instr);
break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
case kMathLog:
DoMathLog(instr);
break;
@@ -2909,13 +3210,13 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1);
}
@@ -2933,7 +3234,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -2942,9 +3242,9 @@
ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(rax, instr->arity());
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -2963,21 +3263,36 @@
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, offset, value, temp);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(temp, offset, value, object);
+ __ RecordWriteField(temp,
+ offset,
+ value,
+ object,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -2988,7 +3303,7 @@
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3025,6 +3340,7 @@
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3036,17 +3352,25 @@
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->index()->IsConstantOperand()) {
- if (instr->length()->IsRegister()) {
- __ cmpq(ToRegister(instr->length()),
+ if (instr->length()->IsRegister()) {
+ Register reg = ToRegister(instr->length());
+ if (FLAG_debug_code) {
+ __ AbortIfNotZeroExtended(reg);
+ }
+ if (instr->index()->IsConstantOperand()) {
+ __ cmpq(reg,
Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
} else {
- __ cmpq(ToOperand(instr->length()),
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+ Register reg2 = ToRegister(instr->index());
+ if (FLAG_debug_code) {
+ __ AbortIfNotZeroExtended(reg2);
+ }
+ __ cmpq(reg, reg2);
}
} else {
- if (instr->length()->IsRegister()) {
- __ cmpq(ToRegister(instr->length()), ToRegister(instr->index()));
+ if (instr->index()->IsConstantOperand()) {
+ __ cmpq(ToOperand(instr->length()),
+ Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
} else {
__ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
}
@@ -3076,12 +3400,20 @@
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ RecordWrite(elements,
+ key,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3110,13 +3442,54 @@
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, ¬_applicable);
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ // Write barrier.
+ ASSERT_NE(instr->temp_reg(), NULL);
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(¬_applicable);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
@@ -3131,85 +3504,19 @@
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- // Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ testb(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, deferred->entry());
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, deferred->entry());
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -3251,6 +3558,7 @@
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3327,6 +3635,7 @@
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3385,6 +3694,7 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Label load_smi, done;
@@ -3412,6 +3722,15 @@
}
// Heap number to XMM conversion.
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, result_reg);
+ __ j(not_equal, &done, Label::kNear);
+ __ movmskpd(kScratchRegister, result_reg);
+ __ testq(kScratchRegister, Immediate(1));
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
// Smi to XMM conversion
@@ -3422,16 +3741,6 @@
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@@ -3480,6 +3789,16 @@
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -3503,6 +3822,7 @@
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
}
@@ -3608,20 +3928,37 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ Cmp(reg, instr->hydrogen()->target());
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ cmpq(reg, Operand(kScratchRegister, 0));
+ } else {
+ __ Cmp(reg, target);
+ }
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMap(reg, map, &success, mode);
+ DeoptimizeIf(not_equal, env);
+ __ bind(&success);
+}
+
+
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@@ -3676,18 +4013,6 @@
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- } else {
- __ Move(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -3695,32 +4020,139 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+}
+
+
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ // Allocate memory for the object. The initial map might change when
+ // the constructor's prototype changes, but instance size and property
+ // counts remain unchanged (if slack tracking finished).
+ ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+ __ AllocateInNewSpace(instance_size,
+ result,
+ no_reg,
+ scratch,
+ deferred->entry(),
+ TAG_OBJECT);
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(scratch, constructor);
+ __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(map);
+ __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+ Immediate(instance_size >> kPointerSizeLog2));
+ __ Assert(equal, "Unexpected instance size");
+ __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+ Immediate(initial_map->pre_allocated_property_fields()));
+ __ Assert(equal, "Unexpected pre-allocated property fields count");
+ __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+ Immediate(initial_map->unused_property_fields()));
+ __ Assert(equal, "Unexpected unused property fields count");
+ __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+ Immediate(initial_map->inobject_properties()));
+ __ Assert(equal, "Unexpected in-object property fields count");
+ }
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), map);
+ __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ if (initial_map->inobject_properties() != 0) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ movq(FieldOperand(result, property_offset), scratch);
+ }
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ PushSafepointRegistersScope scope(this);
+ __ PushHeapObject(constructor);
+ CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- // Setup the parameters to the stub/runtime call.
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rbx, Immediate(Map::kElementsKindMask));
+ __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
+ Map::kElementsKindShift));
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
+ // Set up the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_elements());
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -3736,26 +4168,160 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- // Setup the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_properties());
- __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(rcx));
+ ASSERT(!result.is(rcx));
- // Pick the right runtime function to call.
+ // Only elements backing stores for non-COW arrays need to be copied.
+ Handle<FixedArrayBase> elements(object->elements());
+ bool has_elements = elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map();
+
+ // Increase the offset so that subsequent objects end up right after
+ // this object and its backing store.
+ int object_offset = *offset;
+ int object_size = object->map()->instance_size();
+ int elements_offset = *offset + object_size;
+ int elements_size = has_elements ? elements->Size() : 0;
+ *offset += object_size + elements_size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = object_size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ if (has_elements && i == JSObject::kElementsOffset) {
+ __ lea(rcx, Operand(result, elements_offset));
+ } else {
+ __ movq(rcx, FieldOperand(source, i));
+ }
+ __ movq(FieldOperand(result, object_offset + i), rcx);
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(rcx, Operand(result, *offset));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ } else {
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ }
+
+ if (has_elements) {
+ // Copy elements backing store header.
+ __ LoadHeapObject(source, elements);
+ for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+ __ movq(rcx, FieldOperand(source, i));
+ __ movq(FieldOperand(result, elements_offset + i), rcx);
+ }
+
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(rcx, Operand(result, *offset));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ } else {
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ Push(Smi::FromInt(size));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
+ // Set up the parameters to the stub/runtime call.
+ __ PushHeapObject(literals);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(constant_properties);
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= instr->hydrogen()->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Push(Smi::FromInt(flags));
+
+ // Pick the right runtime function or stub to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3825,8 +4391,7 @@
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->language_mode());
__ Push(shared_info);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -3850,7 +4415,12 @@
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(operand)));
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -3866,12 +4436,11 @@
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ Condition final_branch_condition =
+ EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ if (final_branch_condition != no_condition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -3916,9 +4485,12 @@
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
- final_branch_condition = above_equal;
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+ __ j(equal, true_label);
+ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -3936,7 +4508,6 @@
final_branch_condition = zero;
} else {
- final_branch_condition = never;
__ jmp(false_label);
}
@@ -3978,11 +4549,7 @@
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- while (padding_size > 0) {
- int nop_size = padding_size > 9 ? 9 : padding_size;
- __ nop(nop_size);
- padding_size -= nop_size;
- }
+ __ Nop(padding_size);
}
}
@@ -4051,6 +4618,7 @@
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
@@ -4106,6 +4674,88 @@
osr_pc_offset_ = masm()->pc_offset();
}
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+
+ Register null_value = rdi;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmpq(rax, null_value);
+ DeoptimizeIf(equal, instr->environment());
+
+ Condition cc = masm()->CheckSmi(rax);
+ DeoptimizeIf(cc, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ DeoptimizeIf(below_equal, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(rax);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kMetaMapRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ __ LoadInstanceDescriptors(map, result);
+ __ movq(result,
+ FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ __ movq(result,
+ FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ Condition cc = masm()->CheckSmi(result);
+ DeoptimizeIf(cc, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ __ cmpq(ToRegister(instr->map()),
+ FieldOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+
+ Label out_of_object, done;
+ __ SmiToInteger32(index, index);
+ __ cmpl(index, Immediate(0));
+ __ j(less, &out_of_object);
+ __ movq(object, FieldOperand(object,
+ index,
+ times_pointer_size,
+ JSObject::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ negl(index);
+ // Index is now equal to out of object property index plus 1.
+ __ movq(object, FieldOperand(object,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(&done);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 43c045f..f5045b6 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,6 +78,7 @@
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -96,12 +97,17 @@
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- // Parallel move support.
+ void DoCheckMapCommon(Register reg, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
+// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -126,8 +132,8 @@
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -140,7 +146,8 @@
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary);
+ Register temporary,
+ Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
@@ -189,15 +196,13 @@
int argc,
LInstruction* instr);
-
// Generate a direct call to a known function. Expects the function
- // to be in edi.
+ // to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
@@ -230,6 +235,7 @@
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
@@ -248,17 +254,19 @@
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -267,6 +275,13 @@
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
@@ -280,6 +295,13 @@
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
@@ -346,16 +368,20 @@
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -366,6 +392,7 @@
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index c3c617c..877ea8c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -198,16 +198,18 @@
if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ Move(dst, cgen_->ToHandle(constant_source));
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
if (cgen_->IsInteger32Constant(constant_source)) {
- // Allow top 32 bits of an untagged Integer32 to be arbitrary.
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
+ // value.
+ __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ Move(dst, cgen_->ToHandle(constant_source));
+ __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+ __ movq(dst, kScratchRegister);
}
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5fc5646..593e778 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -214,10 +214,11 @@
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -229,6 +230,13 @@
}
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
@@ -243,6 +251,14 @@
}
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -366,7 +382,7 @@
void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
+ HPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@@ -446,8 +462,14 @@
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new LInstructionGap(block);
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -522,8 +544,8 @@
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
+ chunk_ = new(zone()) LChunk(info(), graph());
+ HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -552,20 +574,15 @@
}
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ XMMRegister::ToAllocationIndex(reg));
}
@@ -580,29 +597,29 @@
LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
LUnallocated::USED_AT_START));
}
@@ -638,7 +655,7 @@
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
}
@@ -647,7 +664,7 @@
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
- allocator_->RecordUse(value, operand);
+ operand->set_virtual_register(value->id());
return operand;
}
@@ -655,22 +672,17 @@
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
+ result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -678,14 +690,16 @@
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr,
int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
@@ -706,7 +720,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -736,7 +752,7 @@
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -748,7 +764,8 @@
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -765,66 +782,46 @@
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ operand->set_virtual_register(allocator_->GetVirtualRegister());
+ if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
+ return new(zone()) LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -836,7 +833,7 @@
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -870,7 +867,7 @@
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -883,7 +880,7 @@
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -901,7 +898,8 @@
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -989,20 +987,26 @@
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber ||
+ hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = 0;
+ LEnvironment* result = new(zone()) LEnvironment(
+ hydrogen_env->closure(),
+ hydrogen_env->frame_type(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1011,57 +1015,70 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- ASSERT(v->IsConstant());
- ASSERT(!v->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new LGoto(successor->block_id());
+ return new(zone()) LGoto(successor->block_id());
}
- return AssignEnvironment(new LBranch(UseRegister(v)));
+
+ LBranch* result = new(zone()) LBranch(UseRegister(value));
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return new LCmpMapAndBranch(value);
+ return new(zone()) LCmpMapAndBranch(value);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+ return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left = UseFixed(instr->left(), rax);
LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new LInstanceOf(left, right);
+ LInstanceOf* result = new(zone()) LInstanceOf(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1069,18 +1086,26 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
- FixedTemp(rdi));
+ new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
+ FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
LOperand* receiver = UseFixed(instr->receiver(), rax);
LOperand* length = UseFixed(instr->length(), rbx);
LOperand* elements = UseFixed(instr->elements(), rcx);
- LApplyArguments* result = new LApplyArguments(function,
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
elements);
@@ -1091,61 +1116,68 @@
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = UseOrConstant(instr->argument());
- return new LPushArgument(argument);
+ return new(zone()) LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
+ return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ return MarkAsCall(new(zone()) LDeclareGlobals, instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ return DefineAsRegister(new(zone()) LGlobalObject);
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
+ return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1169,47 +1201,48 @@
ASSERT(instr->key()->representation().IsTagged());
LOperand* key = UseFixed(instr->key(), rcx);
argument_count_ -= instr->argument_count();
- LCallKeyed* result = new LCallKeyed(key);
+ LCallKeyed* result = new(zone()) LCallKeyed(key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), rdi);
argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction();
+ LCallFunction* result = new(zone()) LCallFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
}
@@ -1228,8 +1261,24 @@
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new(zone()) LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
@@ -1237,21 +1286,11 @@
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new LBitNotI(input);
+ LBitNotI* result = new(zone()) LBitNotI(input);
return DefineSameAsFirst(result);
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1261,7 +1300,7 @@
LOperand* temp = FixedTemp(rdx);
LOperand* dividend = UseFixed(instr->left(), rax);
LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(dividend, divisor, temp);
+ LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1279,7 +1318,8 @@
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ LModI* mod =
+ new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
result = DefineSameAsFirst(mod);
} else {
// The temporary operand is necessary to ensure that right is not
@@ -1287,7 +1327,7 @@
LOperand* temp = FixedTemp(rdx);
LOperand* value = UseFixed(instr->left(), rax);
LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
+ LModI* mod = new(zone()) LModI(value, divisor, temp);
result = DefineFixed(mod, rdx);
}
@@ -1304,7 +1344,7 @@
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
}
@@ -1316,8 +1356,12 @@
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LMulI* mul = new LMulI(left, right);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ LMulI* mul = new(zone()) LMulI(left, right);
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
@@ -1333,7 +1377,7 @@
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
+ LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1354,7 +1398,7 @@
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
+ LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineSameAsFirst(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1384,20 +1428,31 @@
#else
UseFixed(instr->right(), rdi);
#endif
- LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+#ifdef _WIN64
+ LOperand* global_object = UseFixed(instr->global_object(), rcx);
+#else
+ LOperand* global_object = UseFixed(instr->global_object(), rdi);
+#endif
+ LRandom* result = new(zone()) LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
- LCmpT* result = new LCmpT(left, right);
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LCmpT* result = new(zone()) LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1408,16 +1463,23 @@
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ LOperand* left;
+ LOperand* right;
+ if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+ left = UseRegisterOrConstantAtStart(instr->left());
+ right = UseRegisterOrConstantAtStart(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return new(zone()) LCmpIDAndBranch(left, right);
}
}
@@ -1426,47 +1488,72 @@
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new LCmpObjectEqAndBranch(left, right);
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpConstantEqAndBranch(value);
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+ LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+ return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
+ return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(instr->value()));
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsUndetectableAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(left, right);
+
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value);
}
@@ -1475,54 +1562,64 @@
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGetCachedArrayIndex(value));
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasCachedArrayIndexAndBranch(value);
}
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
- TempRegister());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
}
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
+ return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayBaseLength(array));
+ return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LElementsKind(object));
+ return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object);
- return AssignEnvironment(DefineSameAsFirst(result));
+ LValueOf* result = new(zone()) LValueOf(object);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), rax);
+ LDateField* result = new LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(
- UseRegisterOrConstantAtStart(instr->index()),
- Use(instr->length())));
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = Use(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1535,7 +1632,7 @@
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(value), instr);
}
@@ -1558,7 +1655,7 @@
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1567,10 +1664,10 @@
if (needs_check) {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
- return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -1580,26 +1677,27 @@
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp);
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else {
- LNumberTagI* result = new LNumberTagI(value);
+ LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
- return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ LOperand* value = Use(instr->value());
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
UNREACHABLE();
@@ -1609,39 +1707,39 @@
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new LCheckInstanceType(value);
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMap* result = new LCheckMap(value);
+ LCheckMap* result = new(zone()) LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1651,62 +1749,36 @@
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new LClampDToUint8(reg,
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg,
TempRegister()));
} else if (input_rep.IsInteger32()) {
- return DefineSameAsFirst(new LClampIToUint8(reg));
+ return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
- LClampTToUint8* result = new LClampTToUint8(reg,
- TempRegister(),
- FixedTemp(xmm1));
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(xmm1));
return AssignEnvironment(DefineSameAsFirst(result));
}
}
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
- } else if (input_rep.IsInteger32()) {
- // Canonicalization should already have removed the hydrogen instruction in
- // this case, since it is a noop.
- UNREACHABLE();
- return NULL;
- } else {
- ASSERT(input_rep.IsTagged());
- LOperand* reg = UseRegister(value);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* xmm_temp =
- CpuFeatures::IsSupported(SSE3)
- ? NULL
- : FixedTemp(xmm1);
- return AssignEnvironment(
- DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
- }
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), rax));
+ return new(zone()) LReturn(UseFixed(instr->value(), rax));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
+ return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
LOperand* temp = TempRegister();
- return DefineAsRegister(new LConstantD(temp));
+ return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
+ return DefineAsRegister(new(zone()) LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1715,8 +1787,8 @@
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1724,29 +1796,35 @@
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
- return instr->check_hole_value() ? AssignEnvironment(result) : result;
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to avoid reloading the cell value address in the case where
+ // we perform a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+ : new(zone()) LStoreGlobalCell(value, NULL);
}
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new LStoreGlobalGeneric(global_object, value);
+ LStoreGlobalGeneric* result = new(zone()) LStoreGlobalGeneric(global_object,
+ value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1763,14 +1841,15 @@
value = UseRegister(instr->value());
temp = NULL;
}
- return new LStoreContextSlot(context, value, temp);
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new LLoadNamedField(obj));
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -1779,11 +1858,13 @@
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, rax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1791,7 +1872,7 @@
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1799,20 +1880,20 @@
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()))));
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
+ return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
+ return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
@@ -1822,8 +1903,9 @@
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1834,7 +1916,7 @@
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
- new LLoadKeyedFastDoubleElement(elements, key);
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1842,19 +1924,18 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@@ -1867,7 +1948,7 @@
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rax);
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1886,8 +1967,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
@@ -1901,19 +1981,18 @@
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new LStoreKeyedFastDoubleElement(elements, key, val);
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -1928,9 +2007,9 @@
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstant(instr->key());
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -1943,11 +2022,35 @@
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
+ LStoreKeyedGeneric* result =
+ new(zone()) LStoreKeyedGeneric(object, key, value);
return MarkAsCall(result, instr);
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), rax);
+ LOperand* fixed_object_reg = FixedTemp(rdx);
+ LOperand* new_map_reg = FixedTemp(rbx);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object,
+ new_map_reg,
+ fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -1964,7 +2067,7 @@
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
- return new LStoreNamedField(obj, val, temp);
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -1972,7 +2075,7 @@
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
- LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
+ LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
return MarkAsCall(result, instr);
}
@@ -1980,55 +2083,67 @@
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* left = UseOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
+ return DefineAsRegister(new(zone()) LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ LAllocateObject* result = new LAllocateObject(TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+ return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result =
- new LDeleteProperty(UseAtStart(instr->object()),
- UseOrConstantAtStart(instr->key()));
+ LOperand* object = UseAtStart(instr->object());
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2036,13 +2151,13 @@
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
+ return AssignEnvironment(new(zone()) LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
+ return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@@ -2052,13 +2167,13 @@
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
}
@@ -2075,32 +2190,33 @@
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ LAccessArgumentsAt* result =
+ new(zone()) LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new LToFastProperties(object);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+ LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
- return new LIsConstructCallAndBranch(TempRegister());
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
}
@@ -2123,7 +2239,7 @@
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LLazyBailout* lazy_bailout = new LLazyBailout;
+ LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
@@ -2137,10 +2253,10 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new LStackCheck, instr);
+ return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
}
}
@@ -2149,9 +2265,14 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind());
+ instr->call_kind(),
+ instr->is_construct());
+ if (instr->arguments() != NULL) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+ }
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2159,7 +2280,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
@@ -2168,11 +2290,39 @@
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* key = UseOrConstantAtStart(instr->key());
LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new LIn(key, object);
+ LIn* result = new(zone()) LIn(key, object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* object = UseFixed(instr->enumerable(), rax);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index d169bf6..2d8fd2e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,6 +49,7 @@
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -87,11 +88,13 @@
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
+ V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -107,10 +110,12 @@
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -138,6 +143,7 @@
V(Parameter) \
V(Power) \
V(PushArgument) \
+ V(Random) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
@@ -162,11 +168,18 @@
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
- V(ValueOf)
+ V(ValueOf) \
+ V(ForInPrepareMap) \
+ V(ForInCacheArray) \
+ V(CheckMapValue) \
+ V(LoadFieldByIndex) \
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -457,6 +470,20 @@
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
@@ -609,17 +636,18 @@
};
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNullAndBranch(LOperand* value, LOperand* temp) {
+ LIsNilAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -638,6 +666,20 @@
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -666,6 +708,23 @@
};
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -705,11 +764,12 @@
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
+ temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -790,18 +850,15 @@
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -946,6 +1003,22 @@
};
+class LDateField: public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -990,6 +1063,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1206,6 +1290,8 @@
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1223,7 +1309,7 @@
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1257,7 +1343,6 @@
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1274,7 +1359,9 @@
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1296,6 +1383,13 @@
};
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1372,14 +1466,17 @@
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
- LCallFunction() {}
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1548,7 +1645,6 @@
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1568,7 +1664,7 @@
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1653,7 +1749,31 @@
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1719,6 +1839,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@@ -1821,6 +1943,24 @@
};
+class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LAllocateObject(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+};
+
+
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -1948,6 +2088,62 @@
};
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInPrepareMap(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@@ -2021,6 +2217,7 @@
: chunk_(NULL),
info_(info),
graph_(graph),
+ zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2050,6 +2247,7 @@
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2059,7 +2257,6 @@
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2110,8 +2307,6 @@
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2146,12 +2341,12 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
@@ -2161,6 +2356,7 @@
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
+ Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8fcad23..f7db250 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,6 +44,7 @@
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
+ has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -54,7 +55,7 @@
static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_address());
+ reinterpret_cast<Address>(isolate->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
@@ -196,28 +197,47 @@
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear);
- Abort("new-space object passed to RecordWriteHelper");
- bind(¬_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then) {
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
}
-
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- shrl(addr, Immediate(Page::kRegionSizeLog2));
- andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
- // Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
+ // Load store buffer top.
+ LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Store pointer to buffer.
+ movq(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ addq(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Call stub on end of buffer.
+ Label done;
+ // Check for end of buffer.
+ testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(save_fp);
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
+ }
}
@@ -225,7 +245,7 @@
Register scratch,
Condition cc,
Label* branch,
- Label::Distance near_jump) {
+ Label::Distance distance) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
@@ -240,7 +260,7 @@
}
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
@@ -252,35 +272,88 @@
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
}
}
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register index) {
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+ ASSERT(!value.is(rsi) && !dst.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
+ // catch stores of Smis.
Label done;
- JumpIfSmi(value, &done);
- RecordWriteNonSmi(object, offset, value, index);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
bind(&done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors. This clobbering repeats the
- // clobbering done inside RecordWriteNonSmi but it's necessary to
- // avoid having the fast case for smis leave the registers
- // unchanged.
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
+}
+
+
+void MacroAssembler::RecordWriteArray(Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Array access: calculate the destination address. Index is not a smi.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
@@ -289,90 +362,72 @@
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value) {
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+ ASSERT(!value.is(rsi) && !address.is(rsi));
+
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ if (emit_debug_code()) {
+ AbortIfSmi(object);
+ }
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (FLAG_debug_code) {
+ Label ok;
+ cmpq(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
- JumpIfSmi(value, &done);
- InNewSpace(object, value, equal, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done);
+ }
- RecordWriteHelper(object, address, value);
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
-void MacroAssembler::RecordWriteNonSmi(Register object,
- int offset,
- Register scratch,
- Register index) {
- Label done;
-
- if (emit_debug_code()) {
- Label okay;
- JumpIfNotSmi(object, &okay, Label::kNear);
- Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
- bind(&okay);
-
- if (offset == 0) {
- // index must be int32.
- Register tmp = index.is(rax) ? rbx : rax;
- push(tmp);
- movl(tmp, index);
- cmpq(tmp, index);
- Check(equal, "Index register for RecordWrite must be untagged int32.");
- pop(tmp);
- }
- }
-
- // Test that the object address is not in the new space. We cannot
- // update page dirty marks for new space pages.
- InNewSpace(object, scratch, equal, &done);
-
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
-
- Register dst = index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- lea(dst, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- }
- RecordWriteHelper(object, dst, scratch);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
-}
-
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (emit_debug_code()) Check(cc, msg);
}
@@ -400,7 +455,7 @@
Label L;
j(cc, &L, Label::kNear);
Abort(msg);
- // will not return here
+ // Control will not return here.
bind(&L);
}
@@ -440,7 +495,7 @@
// from the real pointer as a smi.
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+ // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
if (msg != NULL) {
@@ -448,9 +503,6 @@
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
@@ -458,52 +510,44 @@
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
+ // Control will not return here.
int3();
}
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,18 +584,11 @@
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1);
- ces.SaveDoubles();
+ CEntryStub ces(1, kSaveFPRegs);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// If the expected number of arguments of the runtime function is
@@ -573,26 +610,6 @@
}
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return HEAP->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- return TryCallStub(&ces);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Set(rax, num_arguments);
@@ -622,24 +639,6 @@
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- return TryJumpToExternalReference(ext, result_size);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -649,15 +648,6 @@
}
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
@@ -680,8 +670,8 @@
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ int stack_space) {
Label empty_result;
Label prologue;
Label promote_scheduled_exception;
@@ -711,8 +701,7 @@
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
// Call the api function!
- movq(rax,
- reinterpret_cast<int64_t>(function->address()),
+ movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::RUNTIME_ENTRY);
call(rax);
@@ -744,11 +733,7 @@
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
- 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
bind(&empty_result);
// It was zero; the result is undefined.
@@ -769,8 +754,6 @@
call(rax);
movq(rax, prev_limit_reg);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -783,20 +766,11 @@
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext, int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- return TryTailCallStub(&ces);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -825,6 +799,64 @@
}
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const Register saved_regs[] = {
+ REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
+ REG(r9), REG(r10), REG(r11)
+};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ push(reg);
+ }
+ }
+ // R12 to r15 are callee save on all platforms.
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(reg, Operand(rsp, i * kDoubleSize));
+ }
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ }
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ pop(reg);
+ }
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -2089,7 +2121,7 @@
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2135,7 +2167,7 @@
movq(scratch1, first_object_instance_type);
movq(scratch2, second_object_instance_type);
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2213,6 +2245,43 @@
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(result, Operand(result, 0));
+ } else {
+ Move(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ push(kScratchRegister);
+ } else {
+ Push(object);
+ }
+}
+
+
+void MacroAssembler::LoadGlobalCell(Register dst,
+ Handle<JSGlobalPropertyCell> cell) {
+ if (dst.is(rax)) {
+ load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ } else {
+ movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(dst, Operand(dst, 0));
+ }
+}
+
+
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@@ -2236,6 +2305,13 @@
}
+void MacroAssembler::TestBit(const Operand& src, int bits) {
+ int byte_offset = bits / kBitsPerByte;
+ int bit_in_byte = bits & (kBitsPerByte - 1);
+ testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -2349,7 +2425,8 @@
// Order general registers are pushed by Pushad:
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+const int
+MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
0,
1,
2,
@@ -2384,146 +2461,146 @@
}
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // The pc (return address) is already on TOS. This code pushes state,
- // frame pointer, context, and current handler.
- if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- push(Immediate(StackHandler::TRY_CATCH));
- } else {
- push(Immediate(StackHandler::TRY_FINALLY));
- }
- push(rbp);
- push(rsi);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for rbp. We expect the code throwing an exception to check rbp
- // before dereferencing it to restore the context.
- push(Immediate(StackHandler::ENTRY));
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First push the frame pointer and context.
+ if (kind == StackHandler::JS_ENTRY) {
+ // The frame pointer does not point to a JS frame so we save NULL for
+ // rbp. We expect the code throwing an exception to check rbp before
+ // dereferencing it to restore the context.
push(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
+ } else {
+ push(rbp);
+ push(rsi);
}
- // Save the current handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
- push(handler_operand);
- // Link this handler.
- movq(handler_operand, rsp);
+
+ // Push the state and the code object.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ push(Immediate(state));
+ Push(CodeObject());
+
+ // Link the current handler as the next handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ push(ExternalOperand(handler_address));
+ // Set this new handler as the current one.
+ movq(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- // Unlink this handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
- pop(handler_operand);
- // Remove the remaining fields.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ pop(ExternalOperand(handler_address));
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // rax = exception, rdi = code object, rdx = state.
+ movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ shr(rdx, Immediate(StackHandler::kKindWidth));
+ movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+ SmiToInteger64(rdx, rdx);
+ lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ jmp(rdi);
+}
+
+
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // Keep thrown value in rax.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in rax.
if (!value.is(rax)) {
movq(rax, value);
}
-
+ // Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Operand handler_operand = ExternalOperand(handler_address);
- movq(rsp, handler_operand);
- // get next in chain
- pop(handler_operand);
+ movq(rsp, ExternalOperand(handler_address));
+ // Restore the next handler.
+ pop(ExternalOperand(handler_address));
+
+ // Remove the code object and state, compute the handler address in rdi.
+ pop(rdi); // Code object.
+ pop(rdx); // Offset and state.
+
+ // Restore the context and frame pointer.
pop(rsi); // Context.
pop(rbp); // Frame pointer.
- pop(rdx); // State.
// If the handler is a JS frame, restore the context to the frame.
- // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
- // of them.
+ // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
+ // rbp or rsi.
Label skip;
- cmpq(rdx, Immediate(StackHandler::ENTRY));
- j(equal, &skip, Label::kNear);
+ testq(rsi, rsi);
+ j(zero, &skip, Label::kNear);
movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
- ret(0);
+ JumpToHandlerEntry();
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // Keep thrown value in rax.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in rax.
if (!value.is(rax)) {
movq(rax, value);
}
- // Fetch top stack handler.
+ // Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Load(rsp, handler_address);
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done, Label::kNear);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- movq(rsp, Operand(rsp, kNextOffset));
- jmp(&loop);
- bind(&done);
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
- // Set the top handler address to next handler past the current ENTRY handler.
- Operand handler_operand = ExternalOperand(handler_address);
- pop(handler_operand);
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ testl(Operand(rsp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::KindField::kMask));
+ j(not_zero, &fetch_next);
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
- Set(rax, static_cast<int64_t>(false));
- Store(external_caught, rax);
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(ExternalOperand(handler_address));
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
- movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- Store(pending_exception, rax);
- }
+ // Remove the code object and state, compute the handler address in rdi.
+ pop(rdi); // Code object.
+ pop(rdx); // Offset and state.
- // Discard the context saved in the handler and clear the context pointer.
- pop(rdx);
- Set(rsi, 0);
+ // Clear the context pointer and frame pointer (0 was saved in the handler).
+ pop(rsi);
+ pop(rbp);
- pop(rbp); // Restore frame pointer.
- pop(rdx); // Discard state.
-
- ret(0);
+ JumpToHandlerEntry();
}
@@ -2567,22 +2644,133 @@
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register index,
+ XMMRegister xmm_scratch,
+ Label* fail) {
+ Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmpl(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(¬_nan);
+ movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, ¬_nan);
+ bind(&is_nan);
+ // Convert all NaNs to the same canonical NaN value when they are stored in
+ // the double array.
+ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ movq(xmm_scratch, kScratchRegister);
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ // Preserve original value.
+ SmiToInteger32(kScratchRegister, maybe_number);
+ cvtlsi2sd(xmm_scratch, kScratchRegister);
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
+ }
+ }
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+
+ Label success;
+ CompareMap(obj, map, &success, mode);
j(not_equal, fail);
+ bind(&success);
}
@@ -2672,6 +2860,14 @@
}
+void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
+ ASSERT(!int32_register.is(kScratchRegister));
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ cmpq(kScratchRegister, int32_register);
+ Assert(above_equal, "32 bit value in register is not zero-extended");
+}
+
+
void MacroAssembler::AbortIfNotString(Register object) {
testb(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is not a string");
@@ -2707,7 +2903,8 @@
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
testl(function, Immediate(kSmiTagMask));
j(zero, miss);
@@ -2716,6 +2913,17 @@
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
+ if (miss_on_bound_function) {
+ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
+ // field).
+ TestBit(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset),
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -2787,10 +2995,10 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2816,27 +3024,34 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
+ bool definitely_mismatches = false;
InvokePrologue(expected,
actual,
Handle<Code>::null(),
code,
&done,
+ &definitely_mismatches,
flag,
Label::kNear,
call_wrapper,
call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- jmp(code);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
+ jmp(code);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -2847,28 +3062,35 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
+ bool definitely_mismatches = false;
Register dummy = rax;
InvokePrologue(expected,
actual,
code,
dummy,
&done,
+ &definitely_mismatches,
flag,
Label::kNear,
call_wrapper,
call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
+ Call(code, rmode);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
+ Jump(code, rmode);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -2877,6 +3099,9 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2891,34 +3116,24 @@
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Get the function and setup the context.
- Move(rdi, Handle<JSFunction>(function));
+ LoadHeapObject(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- if (V8::UseCrankshaft()) {
- // Since Crankshaft can recompile a function, we need to load
- // the Code object every time we call the function.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
- } else {
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code,
- expected,
- actual,
- RelocInfo::CODE_TARGET,
- flag,
- call_wrapper,
- call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
@@ -2927,11 +3142,13 @@
Handle<Code> code_constant,
Register code_register,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
@@ -2947,6 +3164,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
Set(rbx, expected.immediate());
}
}
@@ -2983,7 +3201,9 @@
SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
- jmp(done, near_jump);
+ if (!*definitely_mismatches) {
+ jmp(done, near_jump);
+ }
} else {
SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3022,7 +3242,7 @@
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -3082,7 +3302,7 @@
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r15, Operand(rbp, r14, times_pointer_size, offset));
@@ -3616,7 +3836,7 @@
subq(scratch1, Immediate(kHeaderAlignment));
}
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
@@ -3772,6 +3992,20 @@
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ movq(Operand(start_offset, 0), filler);
+ addq(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmpq(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -3797,6 +4031,46 @@
}
}
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+ j(not_equal, no_map_match);
+
+ // Use the transitioned cached map.
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ movq(map_out, FieldOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ map_out,
+ scratch,
+ &done);
+ }
+ bind(&done);
+}
+
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -3871,6 +4145,7 @@
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
@@ -3885,6 +4160,17 @@
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -3905,6 +4191,241 @@
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ movq(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ Immediate(static_cast<uint8_t>(mask)));
+ } else {
+ testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* on_black,
+ Label::Distance on_black_distance) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ // The mask_scratch register contains a 1 at the position of the first bit
+ // and a 0 at all other positions, including the position of the second bit.
+ movq(rcx, mask_scratch);
+ // Make rcx into a mask that covers both marking bits using the operation
+ // rcx = mask | (mask << 1).
+ lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ // Note that we are using a 4-byte aligned 8-byte load.
+ and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpq(mask_scratch, rcx);
+ j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+ Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance) {
+ Label is_data_object;
+ movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ j(equal, &is_data_object, Label::kNear);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, not_data_object, not_data_object_distance);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ movq(bitmap_reg, addr_reg);
+ // Sign extended 32 bit immediate.
+ and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movq(rcx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shrl(rcx, Immediate(shift));
+ and_(rcx,
+ Immediate((Page::kPageAlignmentMask >> shift) &
+ ~(Bitmap::kBytesPerCell - 1)));
+
+ addq(bitmap_reg, rcx);
+ movq(rcx, addr_reg);
+ shrl(rcx, Immediate(kPointerSizeLog2));
+ and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ movl(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(not_zero, &done, Label::kNear);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ addq(mask_scratch, mask_scratch);
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = rcx; // Holds map while checking type.
+ Register length = rcx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(not_equal, ¬_heap_number, Label::kNear);
+ movq(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(¬_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = rcx;
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ testb(instance_type, Immediate(kExternalStringTag));
+ j(zero, ¬_external, Label::kNear);
+ movq(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(¬_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kAsciiStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ addq(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Label next;
+ Register empty_fixed_array_value = r8;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r9;
+ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ movq(rcx, rax);
+ bind(&next);
+
+ // Check that there are no elements. Register rcx contains the
+ // current JS object we've reached through the prototype chain.
+ cmpq(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
+ j(not_equal, call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in rbx for the subsequent
+ // prototype load.
+ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+ JumpIfSmi(rdx, call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (rdx). This is the case if the next enumeration
+ // index field does not contain a smi.
+ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ JumpIfSmi(rdx, call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ cmpq(rcx, rax);
+ j(equal, &check_prototype, Label::kNear);
+ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ cmpq(rdx, empty_fixed_array_value);
+ j(not_equal, call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ bind(&check_prototype);
+ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ cmpq(rcx, null_value);
+ j(not_equal, &next);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ff6edc5..6bb5cfe 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -49,18 +50,23 @@
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
-static const Register kRootRegister = { 13 }; // r13 (callee save).
+const Register kScratchRegister = { 10 }; // r10.
+const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
+const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
-static const int kSmiConstantRegisterValue = 1;
+const int kSmiConstantRegisterValue = 1;
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
+const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
// Forward declaration.
class JumpTarget;
@@ -72,6 +78,7 @@
ScaleFactor scale;
};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -134,56 +141,145 @@
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
- // ---------------------------------------------------------------------------
- // GC Support
+ // These functions do not arrange the registers in any particular order so
+ // they are not useful for calls that can cause a GC. The caller can
+ // exclude up to 3 registers that do not need to be saved and restored.
+ void PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
- // For page containing |object| mark region covering |addr| dirty.
- // RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object,
- Register addr,
- Register scratch);
+// ---------------------------------------------------------------------------
+// GC Support
- // Check if object is in new space. The condition cc can be equal or
- // not_equal. If it is equal a jump will be done if the object is on new
- // space. The register scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance near_jump = Label::kFar);
- // For page containing |object| mark region covering [object+offset]
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_equal, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, equal, branch, distance);
+ }
+
+ // Check if an object has the black incremental marking color. Also uses rcx!
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Label* object_is_white_and_not_data,
+ Label::Distance distance);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // Operand(reg, off).
+ void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into a fixed array.
+ // |array| is the array being stored into, |value| is the
+ // object being stored. |index| is the array index represented as a non-smi.
+ // All registers are clobbered by the operation RecordWriteArray
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
+ void RecordWriteArray(
+ Register array,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
- // object being stored. If |offset| is zero, then the |scratch|
- // register contains the array index into the elements array
- // represented as an untagged 32-bit integer. All registers are
- // clobbered by the operation. RecordWrite filters out smis so it
- // does not update the write barrier if the value is a smi.
- void RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch);
-
- // For page containing |object| mark region covering [address]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. All registers are clobbered by the
+ // object being stored. The address and value registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
- void RecordWrite(Register object,
- Register address,
- Register value);
-
- // For page containing |object| mark region covering [object+offset] dirty.
- // The value is known to not be a smi.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as an untagged 32-bit integer.
- // All registers are clobbered by the operation.
- void RecordWriteNonSmi(Register object,
- int offset,
- Register value,
- Register scratch);
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -192,15 +288,6 @@
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -232,16 +319,16 @@
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void InitializeRootRegister() {
- ExternalReference roots_address =
- ExternalReference::roots_address(isolate());
- movq(kRootRegister, roots_address);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ movq(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in rcx. The method takes rcx as an
+ // Set up call kind marking in rcx. The method takes rcx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -270,7 +357,7 @@
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -639,6 +726,7 @@
void Push(Smi* smi);
void Test(const Operand& dst, Smi* source);
+
// ---------------------------------------------------------------------------
// String macros.
@@ -657,7 +745,7 @@
Label* on_not_both_flat_ascii,
Label::Distance near_jump = Label::kFar);
- // Check whether the instance type represents a flat ascii string. Jump to the
+ // Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(
@@ -684,6 +772,9 @@
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Bit-field support.
+ void TestBit(const Operand& dst, int bit_index);
+
// Handle support
void Move(Register dst, Handle<Object> source);
void Move(const Operand& dst, Handle<Object> source);
@@ -693,6 +784,22 @@
void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
+ // Load a heap object and handle the case of new-space objects by
+ // indirecting via a global cell.
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Move(result, object);
+ }
+ }
+
+ // Load a global cell into a register.
+ void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
@@ -760,13 +867,46 @@
Label* fail,
Label::Distance distance = Label::kFar);
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by index in
+ // the FastDoubleElements array elements, otherwise jump to fail. Note that
+ // index must not be smi-tagged.
+ void StoreNumberToDoubleElements(Register maybe_number,
+ Register elements,
+ Register index,
+ XMMRegister xmm_scratch,
+ Label* fail);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+ // result of map compare. If multiple map compares are required, the compare
+ // sequences branches to early_success.
+ void CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
@@ -809,6 +949,10 @@
void AbortIfNotSmi(Register object);
void AbortIfNotSmi(const Operand& object);
+ // Abort execution if a 64 bit register containing a 32 bit payload does not
+ // have zeros in the top 32 bits.
+ void AbortIfNotZeroExtended(Register reg);
+
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
@@ -820,9 +964,8 @@
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain. The return
- // address must be pushed before calling this helper.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Push a new try handler and link it into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
@@ -832,7 +975,7 @@
void Throw(Register value);
// Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@@ -966,7 +1109,8 @@
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Generates code for reporting that an illegal operation has
// occurred.
@@ -981,6 +1125,22 @@
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the global context if the map in register
+ // map_in_out is the cached Array map in the global context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out);
+
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -994,19 +1154,9 @@
// Call a code stub.
void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -1016,19 +1166,9 @@
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@@ -1040,38 +1180,26 @@
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext, int result_size);
- // Jump to a runtime routine.
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
- int result_size);
-
- // Prepares stack to put arguments (aligns and so on).
- // WIN64 calling convention requires to put the pointer to the return value
- // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
- // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+ // Prepares stack to put arguments (aligns and so on). WIN64 calling
+ // convention requires to put the pointer to the return value slot into
+ // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
+ // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
void PrepareCallApiFunction(int arg_stack_space);
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers r14, r15, rbx and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers r14, r15, rbx and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Address function_address, int stack_space);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1120,6 +1248,13 @@
int min_length = 0,
Register scratch = kScratchRegister);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1152,20 +1287,33 @@
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Expects object in rax and returns map with validated enum cache
+ // in rax. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Register null_value,
+ Label* call_runtime);
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
- static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+ static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 11;
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
@@ -1184,15 +1332,12 @@
Handle<Code> code_constant,
Register code_register,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump = Label::kFar,
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void EnterExitFramePrologue(bool save_rax);
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1219,6 +1364,24 @@
Register scratch,
bool gc_allowed);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses rcx as scratch and leaves addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
@@ -1256,32 +1419,32 @@
// Static helper functions.
// Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
+inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
return ContextOperand(rsi, Context::GLOBAL_INDEX);
}
// Provides access to exit frame stack space (not GCed).
-static inline Operand StackSpaceOperand(int index) {
+inline Operand StackSpaceOperand(int index) {
#ifdef _WIN64
const int kShaddowSpace = 4;
return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a782bd7..837c254 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, ¬_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@
void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -226,7 +226,7 @@
bool check_end_of_string) {
#ifdef DEBUG
// If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
+ // match contains a non-ASCII character.
if (mode_ == ASCII) {
ASSERT(String::IsAscii(str.start(), str.length()));
}
@@ -431,9 +431,14 @@
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address());
#endif
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
+
+ { // NOLINT: Can't find a way to open this scope without confusing the
+ // linter.
+ AllowExternalCallThatCantCauseGC scope(&masm_);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+ __ CallCFunction(compare, num_arguments);
+ }
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -559,7 +564,7 @@
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(rax, Operand(current_character(), -minus));
__ and_(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
@@ -706,7 +711,12 @@
// registers we need.
// Entry code:
__ bind(&entry_label_);
- // Start new stack frame.
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // is generated.
+ FrameScope scope(&masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
__ push(rbp);
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
@@ -1182,7 +1192,7 @@
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code->address();
@@ -1238,6 +1248,11 @@
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 76d2555..f07f6b6 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,32 +43,61 @@
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
+ Register receiver,
Register name,
+ // The offset is scaled by 4, based on
+ // kHeapObjectTagSize, which is two bits
Register offset) {
- ASSERT_EQ(8, kPointerSize);
- ASSERT_EQ(16, sizeof(StubCache::Entry));
+ // We need to scale up the pointer by 2 because the offset is scaled by less
+ // than the pointer size.
+ ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
+ ScaleFactor scale_factor = times_2;
+
+ ASSERT_EQ(24, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
Label miss;
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
__ LoadAddress(kScratchRegister, key_offset);
+
// Check that the key in the entry matches the name.
// Multiply entry offset by 16 to get the entry address. Since the
// offset register already holds the entry offset times four, multiply
// by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
__ j(not_equal, &miss);
- // Get the code entry from the cache.
- // Use key_offset + kPointerSize, rather than loading value_offset.
+
+ // Get the map entry from the cache.
+ // Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movq(kScratchRegister,
- Operand(kScratchRegister, offset, times_4, kPointerSize));
+ Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
+ __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Get the code entry from the cache.
+ __ LoadAddress(kScratchRegister, value_offset);
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, offset, scale_factor, 0));
+
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
// Jump to the first instruction in the code stub.
__ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
@@ -82,13 +111,12 @@
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
@@ -118,19 +146,14 @@
__ j(not_equal, miss_label);
Label done;
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- if (result->IsFailure()) return result;
-
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
-
- return result;
}
@@ -140,14 +163,16 @@
Register name,
Register scratch,
Register extra,
- Register extra2) {
+ Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 16.
- ASSERT(sizeof(Entry) == 16);
+ USE(extra3); // The register extra2 is not used on the X64 platform.
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 24.
+ ASSERT(sizeof(Entry) == 24);
// Make sure the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -159,6 +184,10 @@
// Check scratch register is valid, extra and extra2 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
+ ASSERT(extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@@ -168,10 +197,12 @@
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -183,11 +214,12 @@
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}
@@ -211,7 +243,10 @@
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ Move(prototype, isolate->global());
@@ -219,8 +254,8 @@
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -312,8 +347,10 @@
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -333,11 +370,11 @@
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- __ Move(kScratchRegister, Handle<Object>(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ __ Move(kScratchRegister, interceptor);
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
@@ -345,11 +382,12 @@
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -403,9 +441,9 @@
// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : object passing the type check
@@ -420,29 +458,25 @@
// -- rsp[(argc + 4) * 8] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ Move(rdi, Handle<JSFunction>(function));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Pass the additional arguments.
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(rcx, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+ __ Move(Operand(rsp, 3 * kPointerSize), call_data);
}
// Prepare arguments.
__ lea(rbx, Operand(rsp, 3 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
#ifdef _WIN64
// Win64 uses first register--rcx--for returned value.
Register arguments_arg = rdx;
@@ -465,12 +499,11 @@
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
+
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ CallApiFunctionAndReturn(function_address,
+ argc + kFastApiCallArguments + 1);
}
@@ -485,16 +518,16 @@
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -502,45 +535,27 @@
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -549,16 +564,14 @@
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
Counters* counters = masm->isolate()->counters();
@@ -574,9 +587,9 @@
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -589,10 +602,11 @@
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -603,10 +617,7 @@
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -627,33 +638,27 @@
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
-
- return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -662,27 +667,30 @@
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ // Leave the internal frame.
+ }
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(not_equal, interceptor_succeeded);
@@ -697,43 +705,33 @@
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
Label* miss_label) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, miss_label);
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -745,12 +743,12 @@
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch); // Return address.
__ push(receiver_reg);
- __ Push(Handle<Map>(transition));
+ __ Push(transition);
__ push(rax);
__ push(scratch);
__ TailCallExternalReference(
@@ -761,11 +759,10 @@
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
+ __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -781,7 +778,8 @@
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -792,7 +790,8 @@
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ __ RecordWriteField(
+ scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
}
// Return the value (register rax).
@@ -803,37 +802,53 @@
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, Handle<Object>(cell));
+ __ Move(scratch, cell);
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
- return cell;
}
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ if (current->IsGlobalObject()) {
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
+ }
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+ }
+}
+
#undef __
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -853,80 +868,56 @@
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = heap()->LookupSymbol(name);
- if (lookup_result->IsFailure()) {
- set_failure(Failure::cast(lookup_result));
- return reg;
- } else {
- name = String::cast(lookup_result->ToObjectUnchecked());
- }
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ Cmp(scratch1, Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ Handle<Map> current_map(current->map());
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ CheckMap(reg, Handle<Map>(current_map),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- } else {
- // Check the map of the current object.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ Move(reg, Handle<JSObject>(prototype));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Move(reg, prototype);
+ }
}
if (save_at_depth == depth) {
@@ -936,62 +927,46 @@
// Go to the next object in the prototype chain.
current = prototype;
}
-
- // Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- __ j(not_equal, miss);
+ ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(current == holder);
+ // Check the holder map.
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ // Perform security check for access to the global object.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
if (current->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(current),
- name,
- scratch1,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return reg;
- }
- }
- current = JSObject::cast(current->GetPrototype());
- }
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -999,25 +974,22 @@
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch2.is(reg));
@@ -1025,11 +997,11 @@
__ push(receiver); // receiver
__ push(reg); // holder
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch1, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch1, callback);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
- __ Push(Handle<Object>(callback_handle->data()));
+ __ Push(Handle<Object>(callback->data()));
}
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
@@ -1048,11 +1020,7 @@
__ movq(name_arg, rsp);
__ push(scratch2); // Restore return address.
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
- // 3 elements array for v8::Agruments::values_ and handler for name.
+ // 3 elements array for v8::Arguments::values_ and handler for name.
const int kStackSpace = 4;
// Allocate v8::AccessorInfo in non-GCed stack space.
@@ -1068,45 +1036,42 @@
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ CallApiFunctionAndReturn(getter_address, kStackSpace);
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<JSFunction> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ Move(rax, Handle<Object>(value));
+ __ LoadHeapObject(rax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1118,13 +1083,13 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1139,47 +1104,49 @@
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ // Leave the internal frame.
}
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1191,15 +1158,15 @@
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
@@ -1208,7 +1175,7 @@
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
- __ Move(holder_reg, Handle<AccessorInfo>(callback));
+ __ Move(holder_reg, callback);
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(holder_reg);
__ push(name_reg);
@@ -1237,17 +1204,17 @@
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, miss);
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1257,27 +1224,23 @@
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ JumpIfSmi(rdx, miss);
- }
// Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, miss);
CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rdi, cell);
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1290,30 +1253,26 @@
// Check the shared function info. Make sure it hasn't changed.
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- __ j(not_equal, miss);
} else {
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, miss);
+ __ Cmp(rdi, function);
}
+ __ j(not_equal, miss);
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1353,7 +1312,7 @@
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1361,19 +1320,19 @@
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1383,10 +1342,9 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -1396,14 +1354,8 @@
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object),
- rdx,
- holder,
- rbx,
- rax,
- rdi,
- name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1412,53 +1364,85 @@
} else {
Label call_builtin;
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
-
if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier;
+
+ // Get the elements array of the object.
+ __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ factory()->fixed_array_map());
+ __ j(not_equal, &call_builtin);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
- // Get the element's length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
+
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Push the element.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ lea(rdx, FieldOperand(rbx,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
+ // Store the value.
+ __ movq(FieldOperand(rdi,
+ rax,
+ times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize),
+ rcx);
- // Check if value is a smi.
__ Integer32ToSmi(rax, rax); // Return new length as smi.
-
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- __ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
- __ InNewSpace(rbx, rcx, equal, &exit);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ RecordWriteHelper(rbx, rdx, rcx);
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ Label fast_object, not_fast_object;
+ __ CheckFastObjectElements(rbx, ¬_fast_object, Label::kNear);
+ __ jmp(&fast_object);
+ // In case of fast smi-only, convert to fast object, otherwise bail out.
+ __ bind(¬_fast_object);
+ __ CheckFastSmiOnlyElements(rbx, &call_builtin);
+ // rdx: receiver
+ // rbx: map
+ __ movq(r9, rdi); // Backup rdi as it is going to be trashed.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ &call_builtin);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ __ movq(rdi, r9);
+ __ bind(&fast_object);
+ } else {
+ __ CheckFastObjectElements(rbx, &call_builtin);
+ }
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ // Store the value.
+ __ lea(rdx, FieldOperand(rdi,
+ rax, times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ movq(Operand(rdx, 0), rcx);
+
+ __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1466,6 +1450,15 @@
__ jmp(&call_builtin);
}
+ __ movq(rbx, Operand(rsp, argc * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(rbx, &no_fast_elements_check);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
+ __ bind(&no_fast_elements_check);
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
@@ -1476,7 +1469,7 @@
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rbx,
+ __ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
@@ -1489,28 +1482,33 @@
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
// Push the argument...
- __ movq(Operand(rdx, 0), rcx);
+ __ movq(Operand(rdx, 0), rbx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have to
+ // tell the incremental marker to rescan the object that we just grew. We
+ // don't need to worry about the holes because they are in old space and
+ // already marked black.
+ __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
// Restore receiver to rdx as finish sequence assumes it's here.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
+ __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -1522,19 +1520,19 @@
}
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1544,10 +1542,9 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -1557,9 +1554,8 @@
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object), rdx,
- holder, rbx,
- rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1605,20 +1601,19 @@
1);
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1628,7 +1623,7 @@
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
@@ -1636,13 +1631,11 @@
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1650,12 +1643,90 @@
Context::STRING_FUNCTION_INDEX,
rax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rbx;
Register index = rdi;
+ Register result = rax;
+ __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ StubRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm(), call_helper);
+
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+ }
+
+ __ bind(&miss);
+ // Restore function name in rcx.
+ __ Move(rcx, name);
+ __ bind(&name_miss);
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+
+ const int argc = arguments().immediate();
+ Label miss;
+ Label name_miss;
+ Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_state_) ==
+ DEFAULT_STRING_STUB)) {
+ index_out_of_range_label = &miss;
+ }
+ GenerateNameCheck(name, &name_miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rax,
+ &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+
+ Register receiver = rax;
+ Register index = rdi;
Register scratch = rdx;
Register result = rax;
__ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1665,130 +1736,42 @@
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
- __ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch1 = rbx;
- Register scratch2 = rdx;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kEmptyStringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
-
__ bind(&miss);
// Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
+ __ Move(rcx, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1797,25 +1780,23 @@
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1830,17 +1811,17 @@
// Convert the smi code to uint16.
__ SmiAndConstant(code, code, Smi::FromInt(0xffff));
- StringCharFromCodeGenerator char_from_code_generator(code, rax);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, rax);
+ generator.GenerateFast(masm());
__ ret(2 * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1848,29 +1829,30 @@
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// TODO(872): implement this.
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1879,28 +1861,25 @@
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
-
// Load the (only) argument into rax.
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -1957,7 +1936,7 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1965,33 +1944,31 @@
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
@@ -2010,32 +1987,30 @@
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, depth, &miss);
// Move the return address on top of the stack.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// rcx : function name
@@ -2048,16 +2023,14 @@
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2074,14 +2047,13 @@
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
Counters* counters = isolate()->counters();
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+ rdi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2092,28 +2064,25 @@
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(rdx, &fast);
@@ -2123,18 +2092,18 @@
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2145,17 +2114,18 @@
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2163,17 +2133,16 @@
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2184,30 +2153,20 @@
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss);
- if (result->IsFailure()) return result;
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+ &miss);
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2226,7 +2185,7 @@
// Invoke the function.
__ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2234,19 +2193,19 @@
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2258,23 +2217,17 @@
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
@@ -2283,45 +2236,37 @@
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
- // Setup the context (function already in rdi).
+ // Set up the context (function already in rdi).
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
+
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2331,12 +2276,7 @@
Label miss;
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2344,13 +2284,14 @@
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2359,13 +2300,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
+ __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2378,7 +2315,7 @@
__ pop(rbx); // remove the return address
__ push(rdx); // receiver
- __ Push(Handle<AccessorInfo>(callback)); // callback info
+ __ Push(callback); // callback info
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // restore return address
@@ -2398,8 +2335,9 @@
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2408,13 +2346,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2447,9 +2381,10 @@
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2463,17 +2398,20 @@
Handle<Map>(object->map()));
__ j(not_equal, &miss);
+ // Compute the cell operand to use.
+ __ Move(rbx, cell);
+ Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
+
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
// Store the value in the cell.
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+ __ movq(cell_operand, rax);
+ // Cells are always rescanned, so no write barrier here.
// Return the value (register rax).
Counters* counters = isolate()->counters();
@@ -2491,10 +2429,10 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2507,16 +2445,11 @@
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, &miss);
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2525,39 +2458,38 @@
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
+
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2565,18 +2497,22 @@
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ JumpIfSmi(rdx, &miss);
+ __ JumpIfSmi(rdx, &miss, Label::kNear);
- Register map_reg = rbx;
- __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
+ for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(current));
- __ Cmp(map_reg, map);
- __ j(equal,
- Handle<Code>(handler_ics->at(current)),
- RelocInfo::CODE_TARGET);
+ __ Cmp(rdi, receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -2584,13 +2520,13 @@
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2609,15 +2545,8 @@
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- rdx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2629,14 +2558,14 @@
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2653,24 +2582,19 @@
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
- rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2679,10 +2603,10 @@
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2699,32 +2623,22 @@
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rax,
- rcx,
- rdx,
- rbx,
- rdi,
- name,
- &miss);
-
+ GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2733,11 +2647,12 @@
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2745,18 +2660,12 @@
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (object != holder) {
- __ JumpIfSmi(rax, &miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(rax, &miss);
CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rbx, cell);
__ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2782,9 +2691,9 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- rax : key
@@ -2797,7 +2706,7 @@
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2811,34 +2720,27 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
- rcx, rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+ name, &miss);
__ bind(&miss);
-
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2847,10 +2749,11 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2862,7 +2765,7 @@
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -2876,35 +2779,27 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2914,7 +2809,8 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2926,7 +2822,7 @@
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2939,7 +2835,8 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2951,7 +2848,7 @@
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -2964,7 +2861,8 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2976,7 +2874,7 @@
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -2989,32 +2887,29 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3028,24 +2923,22 @@
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(current));
- __ Cmp(map_reg, map);
- __ j(equal,
- Handle<Code>(handler_ics->at(current)),
- RelocInfo::CODE_TARGET);
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
}
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
@@ -3088,12 +2981,8 @@
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+ &generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
@@ -3118,7 +3007,7 @@
// r9: first in-object property of the JSObject
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
// Check if the argument assigned to the property is actually passed.
@@ -3166,10 +3055,8 @@
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
@@ -3436,6 +3323,7 @@
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3503,6 +3391,7 @@
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3634,15 +3523,19 @@
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind, finish_store, grow;
+ Label check_capacity, slow;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3650,28 +3543,45 @@
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &miss_force_generic);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(rax, &transition_elements_kind);
+ }
+
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
// Check that the key is within bounds.
if (is_js_array) {
__ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
}
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ movq(rdx, rax);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ RecordWrite(rdi, 0, rdx, rcx);
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &miss_force_generic);
+
+ __ bind(&finish_store);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ SmiToInteger32(rcx, rcx);
+ __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ } else {
+ // Do the store and update the write barrier.
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ SmiToInteger32(rcx, rcx);
+ __ lea(rcx,
+ FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ movq(Operand(rcx, 0), rax);
+ // Make sure to preserve the value in register rax.
+ __ movq(rbx, rax);
+ __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
+ }
// Done.
__ ret(0);
@@ -3681,20 +3591,93 @@
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+ masm->isolate()->factory()->fixed_array_map());
+ __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
+ Smi::FromInt(JSArray::kPreallocatedArrayElements));
+ __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
+ }
+
+ // Store the element at index zero.
+ __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
+
+ // Install the new backing store in the JSArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+ __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+ __ ret(0);
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub.
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &miss_force_generic);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+ Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan;
- Label have_double_value, not_nan;
+ Label miss_force_generic, transition_elements_kind, finish_store;
+ Label grow, slow, check_capacity;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3708,57 +3691,22 @@
// Check that the key is within bounds.
if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
}
- __ j(above_equal, &miss_force_generic);
// Handle smi values specially
- __ JumpIfSmi(rax, &smi_value, Label::kNear);
-
- __ CheckMap(rax,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rax, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- __ j(greater_equal, &maybe_nan, Label::kNear);
-
- __ bind(¬_nan);
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ bind(&have_double_value);
+ __ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
- __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ ret(0);
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ j(greater, &is_nan, Label::kNear);
- __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
- __ j(zero, ¬_nan);
- __ bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(xmm0, kScratchRegister);
- __ jmp(&have_double_value, Label::kNear);
-
- __ bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- __ SmiToInteger32(rdx, rax);
- __ push(rdx);
- __ fild_s(Operand(rsp, 0));
- __ pop(rdx);
- __ SmiToInteger32(rcx, rcx);
- __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+ __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+ &transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3766,6 +3714,77 @@
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ // Restore smi-tagging of rcx.
+ __ Integer32ToSmi(rcx, rcx);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(rax, &value_is_smi);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+ masm->isolate()->factory()->fixed_double_array_map());
+ __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
+ Smi::FromInt(JSArray::kPreallocatedArrayElements));
+
+ // Install the new backing store in the JSArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+ __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+ Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}