Update V8 to version 4.1.0.21
This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.
Original commit message:
Version 4.1.0.21 (cherry-pick)
Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412
Unlink pages from the space page list after evacuation.
BUG=430201
LOG=N
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/953813002
Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}
---
FPIIM-449
Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index f71d3bf..0480f9d 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -19,186 +19,487 @@
#define __ masm()->
-// TODO(turbofan): Cleanup these hacks.
-enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
-
-
-struct Immediate64 {
- uint64_t value;
- Handle<Object> handle;
- ExternalReference reference;
- Immediate64Type type;
-};
-
-
-enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
-
-
-struct RegisterOrOperand {
- RegisterOrOperand() : operand(no_reg, 0) {}
- Register reg;
- DoubleRegister double_reg;
- Operand operand;
- RegisterOrOperandType type;
-};
-
-
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
X64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- RegisterOrOperand InputRegisterOrOperand(int index) {
- return ToRegisterOrOperand(instr_->InputAt(index));
- }
-
Immediate InputImmediate(int index) {
return ToImmediate(instr_->InputAt(index));
}
- RegisterOrOperand OutputRegisterOrOperand() {
- return ToRegisterOrOperand(instr_->Output());
- }
+ Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
- Immediate64 InputImmediate64(int index) {
- return ToImmediate64(instr_->InputAt(index));
- }
-
- Immediate64 ToImmediate64(InstructionOperand* operand) {
- Constant constant = ToConstant(operand);
- Immediate64 immediate;
- immediate.value = 0xbeefdeaddeefbeed;
- immediate.type = kImm64Value;
- switch (constant.type()) {
- case Constant::kInt32:
- case Constant::kInt64:
- immediate.value = constant.ToInt64();
- return immediate;
- case Constant::kFloat64:
- immediate.type = kImm64Handle;
- immediate.handle =
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
- return immediate;
- case Constant::kExternalReference:
- immediate.type = kImm64Reference;
- immediate.reference = constant.ToExternalReference();
- return immediate;
- case Constant::kHeapObject:
- immediate.type = kImm64Handle;
- immediate.handle = constant.ToHeapObject();
- return immediate;
- }
- UNREACHABLE();
- return immediate;
- }
+ Operand OutputOperand() { return ToOperand(instr_->Output()); }
Immediate ToImmediate(InstructionOperand* operand) {
- Constant constant = ToConstant(operand);
- switch (constant.type()) {
- case Constant::kInt32:
- return Immediate(constant.ToInt32());
- case Constant::kInt64:
- case Constant::kFloat64:
- case Constant::kExternalReference:
- case Constant::kHeapObject:
- break;
- }
- UNREACHABLE();
- return Immediate(-1);
+ return Immediate(ToConstant(operand).ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {
- RegisterOrOperand result = ToRegisterOrOperand(op, extra);
- DCHECK_EQ(kOperand, result.type);
- return result.operand;
- }
-
- RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
- RegisterOrOperand result;
- if (op->IsRegister()) {
- DCHECK(extra == 0);
- result.type = kRegister;
- result.reg = ToRegister(op);
- return result;
- } else if (op->IsDoubleRegister()) {
- DCHECK(extra == 0);
- DCHECK(extra == 0);
- result.type = kDoubleRegister;
- result.double_reg = ToDoubleRegister(op);
- return result;
- }
-
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-
- result.type = kOperand;
// The linkage computes where all spill slots are located.
FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
- result.operand =
- Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
- return result;
+ return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
}
- Operand MemoryOperand(int* first_input) {
- const int offset = *first_input;
- switch (AddressingModeField::decode(instr_->opcode())) {
- case kMode_MR1I: {
- *first_input += 2;
- Register index = InputRegister(offset + 1);
- return Operand(InputRegister(offset + 0), index, times_1,
- 0); // TODO(dcarney): K != 0
+ static int NextOffset(int* offset) {
+ int i = *offset;
+ (*offset)++;
+ return i;
+ }
+
+ static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+ STATIC_ASSERT(0 == static_cast<int>(times_1));
+ STATIC_ASSERT(1 == static_cast<int>(times_2));
+ STATIC_ASSERT(2 == static_cast<int>(times_4));
+ STATIC_ASSERT(3 == static_cast<int>(times_8));
+ int scale = static_cast<int>(mode - one);
+ DCHECK(scale >= 0 && scale < 4);
+ return static_cast<ScaleFactor>(scale);
+ }
+
+ Operand MemoryOperand(int* offset) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ switch (mode) {
+ case kMode_MR: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
}
- case kMode_MRI:
- *first_input += 2;
- return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
- default:
+ case kMode_MRI: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+ int32_t disp = 0;
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_M1: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
+ }
+ case kMode_M2:
+ UNREACHABLE(); // Should use kModeMR with more compact encoding instead
+ return Operand(no_reg, 0);
+ case kMode_M4:
+ case kMode_M8: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1, mode);
+ int32_t disp = 0;
+ return Operand(index, scale, disp);
+ }
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(index, scale, disp);
+ }
+ case kMode_None:
UNREACHABLE();
return Operand(no_reg, 0);
}
+ UNREACHABLE();
+ return Operand(no_reg, 0);
}
- Operand MemoryOperand() {
- int first_input = 0;
+ Operand MemoryOperand(int first_input = 0) {
return MemoryOperand(&first_input);
}
};
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsImmediate();
}
-#define ASSEMBLE_BINOP(asm_instr) \
- do { \
- if (HasImmediateInput(instr, 1)) { \
- RegisterOrOperand input = i.InputRegisterOrOperand(0); \
- if (input.type == kRegister) { \
- __ asm_instr(input.reg, i.InputImmediate(1)); \
- } else { \
- __ asm_instr(input.operand, i.InputImmediate(1)); \
- } \
- } else { \
- RegisterOrOperand input = i.InputRegisterOrOperand(1); \
- if (input.type == kRegister) { \
- __ asm_instr(i.InputRegister(0), input.reg); \
- } else { \
- __ asm_instr(i.InputRegister(0), input.operand); \
- } \
- } \
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ xorl(result_, result_); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineLoadNaN FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+ XMMRegister const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+ public:
+ OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+ XMMRegister input)
+ : OutOfLineCode(gen), result_(result), input_(input) {}
+
+ void Generate() FINAL {
+ __ subp(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), input_);
+ __ SlowTruncateToI(result_, rsp, 0);
+ __ addp(rsp, Immediate(kDoubleSize));
+ }
+
+ private:
+ Register const result_;
+ XMMRegister const input_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_UNOP(asm_instr) \
+ do { \
+ if (instr->Output()->IsRegister()) { \
+ __ asm_instr(i.OutputRegister()); \
+ } else { \
+ __ asm_instr(i.OutputOperand()); \
+ } \
} while (0)
-#define ASSEMBLE_SHIFT(asm_instr, width) \
- do { \
- if (HasImmediateInput(instr, 1)) { \
- __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
- } else { \
- __ asm_instr##_cl(i.OutputRegister()); \
- } \
+#define ASSEMBLE_BINOP(asm_instr) \
+ do { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
} while (0)
+#define ASSEMBLE_MULT(asm_instr) \
+ do { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
+ i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
+ } \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width) \
+ do { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->Output()->IsRegister()) { \
+ __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+ } else { \
+ __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
+ } \
+ } else { \
+ if (instr->Output()->IsRegister()) { \
+ __ asm_instr##_cl(i.OutputRegister()); \
+ } else { \
+ __ asm_instr##_cl(i.OutputOperand()); \
+ } \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
+ do { \
+ if (instr->InputAt(1)->IsDoubleRegister()) { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
+ do { \
+ CpuFeatureScope avx_scope(masm(), AVX); \
+ if (instr->InputAt(1)->IsDoubleRegister()) { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputOperand(1)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto buffer = i.InputRegister(0); \
+ auto index1 = i.InputRegister(1); \
+ auto index2 = i.InputInt32(2); \
+ OutOfLineCode* ool; \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto length = i.InputRegister(3); \
+ DCHECK_EQ(0, index2); \
+ __ cmpl(index1, length); \
+ ool = new (zone()) OutOfLineLoadNaN(this, result); \
+ } else { \
+ auto length = i.InputInt32(3); \
+ DCHECK_LE(index2, length); \
+ __ cmpq(index1, Immediate(length - index2)); \
+ class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
+ public: \
+ OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
+ Register buffer, Register index1, int32_t index2, \
+ int32_t length) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length) {} \
+ \
+ void Generate() FINAL { \
+ __ leal(kScratchRegister, Operand(index1_, index2_)); \
+ __ pcmpeqd(result_, result_); \
+ __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ j(above_equal, exit()); \
+ __ asm_instr(result_, \
+ Operand(buffer_, kScratchRegister, times_1, 0)); \
+ } \
+ \
+ private: \
+ XMMRegister const result_; \
+ Register const buffer_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ }; \
+ ool = new (zone()) \
+ OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
+ } \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto buffer = i.InputRegister(0); \
+ auto index1 = i.InputRegister(1); \
+ auto index2 = i.InputInt32(2); \
+ OutOfLineCode* ool; \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto length = i.InputRegister(3); \
+ DCHECK_EQ(0, index2); \
+ __ cmpl(index1, length); \
+ ool = new (zone()) OutOfLineLoadZero(this, result); \
+ } else { \
+ auto length = i.InputInt32(3); \
+ DCHECK_LE(index2, length); \
+ __ cmpq(index1, Immediate(length - index2)); \
+ class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
+ public: \
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
+ Register buffer, Register index1, int32_t index2, \
+ int32_t length) \
+ : OutOfLineCode(gen), \
+ result_(result), \
+ buffer_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length) {} \
+ \
+ void Generate() FINAL { \
+ Label oob; \
+ __ leal(kScratchRegister, Operand(index1_, index2_)); \
+ __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ j(above_equal, &oob, Label::kNear); \
+ __ asm_instr(result_, \
+ Operand(buffer_, kScratchRegister, times_1, 0)); \
+ __ jmp(exit()); \
+ __ bind(&oob); \
+ __ xorl(result_, result_); \
+ } \
+ \
+ private: \
+ Register const result_; \
+ Register const buffer_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ }; \
+ ool = new (zone()) \
+ OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
+ } \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto index1 = i.InputRegister(1); \
+ auto index2 = i.InputInt32(2); \
+ auto value = i.InputDoubleRegister(4); \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto length = i.InputRegister(3); \
+ DCHECK_EQ(0, index2); \
+ Label done; \
+ __ cmpl(index1, length); \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(&done); \
+ } else { \
+ auto length = i.InputInt32(3); \
+ DCHECK_LE(index2, length); \
+ __ cmpq(index1, Immediate(length - index2)); \
+ class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
+ public: \
+ OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ XMMRegister value) \
+ : OutOfLineCode(gen), \
+ buffer_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value) {} \
+ \
+ void Generate() FINAL { \
+ __ leal(kScratchRegister, Operand(index1_, index2_)); \
+ __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ j(above_equal, exit()); \
+ __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
+ value_); \
+ } \
+ \
+ private: \
+ Register const buffer_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ XMMRegister const value_; \
+ }; \
+ auto ool = new (zone()) \
+ OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(ool->exit()); \
+ } \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto index1 = i.InputRegister(1); \
+ auto index2 = i.InputInt32(2); \
+ if (instr->InputAt(3)->IsRegister()) { \
+ auto length = i.InputRegister(3); \
+ DCHECK_EQ(0, index2); \
+ Label done; \
+ __ cmpl(index1, length); \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(&done); \
+ } else { \
+ auto length = i.InputInt32(3); \
+ DCHECK_LE(index2, length); \
+ __ cmpq(index1, Immediate(length - index2)); \
+ class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
+ public: \
+ OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
+ Register index1, int32_t index2, int32_t length, \
+ Value value) \
+ : OutOfLineCode(gen), \
+ buffer_(buffer), \
+ index1_(index1), \
+ index2_(index2), \
+ length_(length), \
+ value_(value) {} \
+ \
+ void Generate() FINAL { \
+ __ leal(kScratchRegister, Operand(index1_, index2_)); \
+ __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ j(above_equal, exit()); \
+ __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
+ value_); \
+ } \
+ \
+ private: \
+ Register const buffer_; \
+ Register const index1_; \
+ int32_t const index2_; \
+ int32_t const length_; \
+ Value const value_; \
+ }; \
+ auto ool = new (zone()) \
+ OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
+ __ bind(ool->exit()); \
+ } \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ if (instr->InputAt(4)->IsRegister()) { \
+ Register value = i.InputRegister(4); \
+ ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
+ } else { \
+ Immediate value = i.InputImmediate(4); \
+ ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+ } \
+ } while (false)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
@@ -230,7 +531,7 @@
break;
}
case kArchJmp:
- __ jmp(code_->GetLabel(i.InputBlock(0)));
+ AssembleArchJump(i.InputRpo(0));
break;
case kArchNop:
// don't emit code for nops.
@@ -238,9 +539,19 @@
case kArchRet:
AssembleReturn();
break;
- case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ case kArchStackPointer:
+ __ movq(i.OutputRegister(), rsp);
break;
+ case kArchTruncateDoubleToI: {
+ auto result = i.OutputRegister();
+ auto input = i.InputDoubleRegister(0);
+ auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+ __ cvttsd2siq(result, input);
+ __ cmpq(result, Immediate(1));
+ __ j(overflow, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -272,39 +583,23 @@
ASSEMBLE_BINOP(testq);
break;
case kX64Imul32:
- if (HasImmediateInput(instr, 1)) {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
- } else {
- __ movq(kScratchRegister, input.operand);
- __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
- }
- } else {
- RegisterOrOperand input = i.InputRegisterOrOperand(1);
- if (input.type == kRegister) {
- __ imull(i.OutputRegister(), input.reg);
- } else {
- __ imull(i.OutputRegister(), input.operand);
- }
- }
+ ASSEMBLE_MULT(imull);
break;
case kX64Imul:
- if (HasImmediateInput(instr, 1)) {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
- } else {
- __ movq(kScratchRegister, input.operand);
- __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
- }
+ ASSEMBLE_MULT(imulq);
+ break;
+ case kX64ImulHigh32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ imull(i.InputRegister(1));
} else {
- RegisterOrOperand input = i.InputRegisterOrOperand(1);
- if (input.type == kRegister) {
- __ imulq(i.OutputRegister(), input.reg);
- } else {
- __ imulq(i.OutputRegister(), input.operand);
- }
+ __ imull(i.InputOperand(1));
+ }
+ break;
+ case kX64UmulHigh32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ mull(i.InputRegister(1));
+ } else {
+ __ mull(i.InputOperand(1));
}
break;
case kX64Idiv32:
@@ -323,42 +618,18 @@
__ xorq(rdx, rdx);
__ divq(i.InputRegister(1));
break;
- case kX64Not: {
- RegisterOrOperand output = i.OutputRegisterOrOperand();
- if (output.type == kRegister) {
- __ notq(output.reg);
- } else {
- __ notq(output.operand);
- }
+ case kX64Not:
+ ASSEMBLE_UNOP(notq);
break;
- }
- case kX64Not32: {
- RegisterOrOperand output = i.OutputRegisterOrOperand();
- if (output.type == kRegister) {
- __ notl(output.reg);
- } else {
- __ notl(output.operand);
- }
+ case kX64Not32:
+ ASSEMBLE_UNOP(notl);
break;
- }
- case kX64Neg: {
- RegisterOrOperand output = i.OutputRegisterOrOperand();
- if (output.type == kRegister) {
- __ negq(output.reg);
- } else {
- __ negq(output.operand);
- }
+ case kX64Neg:
+ ASSEMBLE_UNOP(negq);
break;
- }
- case kX64Neg32: {
- RegisterOrOperand output = i.OutputRegisterOrOperand();
- if (output.type == kRegister) {
- __ negl(output.reg);
- } else {
- __ negl(output.operand);
- }
+ case kX64Neg32:
+ ASSEMBLE_UNOP(negl);
break;
- }
case kX64Or32:
ASSEMBLE_BINOP(orl);
break;
@@ -395,26 +666,20 @@
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
- case kSSEFloat64Cmp: {
- RegisterOrOperand input = i.InputRegisterOrOperand(1);
- if (input.type == kDoubleRegister) {
- __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
- } else {
- __ ucomisd(i.InputDoubleRegister(0), input.operand);
- }
+ case kSSEFloat64Cmp:
+ ASSEMBLE_DOUBLE_BINOP(ucomisd);
break;
- }
case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_DOUBLE_BINOP(addsd);
break;
case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_DOUBLE_BINOP(subsd);
break;
case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_DOUBLE_BINOP(mulsd);
break;
case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ ASSEMBLE_DOUBLE_BINOP(divsd);
break;
case kSSEFloat64Mod: {
__ subq(rsp, Immediate(kDoubleSize));
@@ -431,7 +696,8 @@
__ fprem();
// The following 2 instruction implicitly use rax.
__ fnstsw_ax();
- if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+ if (CpuFeatures::IsSupported(SAHF)) {
+ CpuFeatureScope sahf_scope(masm(), SAHF);
__ sahf();
} else {
__ shrl(rax, Immediate(8));
@@ -447,52 +713,97 @@
__ addq(rsp, Immediate(kDoubleSize));
break;
}
- case kSSEFloat64Sqrt: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kDoubleRegister) {
- __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+ case kSSEFloat64Sqrt:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
} else {
- __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+ __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
- }
- case kSSEFloat64ToInt32: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kDoubleRegister) {
- __ cvttsd2si(i.OutputRegister(), input.double_reg);
- } else {
- __ cvttsd2si(i.OutputRegister(), input.operand);
- }
+ case kSSEFloat64Floor: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundDown);
break;
}
+ case kSSEFloat64Ceil: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundUp);
+ break;
+ }
+ case kSSEFloat64RoundTruncate: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::kRoundToZero);
+ break;
+ }
+ case kSSECvtss2sd:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSECvtsd2ss:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEFloat64ToInt32:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEFloat64ToUint32: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kDoubleRegister) {
- __ cvttsd2siq(i.OutputRegister(), input.double_reg);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2siq(i.OutputRegister(), input.operand);
+ __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
- __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
- // TODO(turbofan): generated code should not look at the upper 32 bits
- // of the result, but those bits could escape to the outside world.
+ __ AssertZeroExtended(i.OutputRegister());
break;
}
- case kSSEInt32ToFloat64: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+ case kSSEInt32ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+ __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
- }
- case kSSEUint32ToFloat64: {
- // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
- __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ case kSSEUint32ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movl(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movl(kScratchRegister, i.InputOperand(0));
+ }
+ __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
- }
+ case kAVXFloat64Add:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
+ break;
+ case kAVXFloat64Sub:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
+ break;
+ case kAVXFloat64Mul:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
+ break;
+ case kAVXFloat64Div:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
+ break;
case kX64Movsxbl:
- __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+ if (instr->addressing_mode() != kMode_None) {
+ __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+ } else if (instr->InputAt(0)->IsRegister()) {
+ __ movsxbl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ movsxbl(i.OutputRegister(), i.InputOperand(0));
+ }
+ __ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
__ movzxbl(i.OutputRegister(), i.MemoryOperand());
@@ -508,10 +819,18 @@
break;
}
case kX64Movsxwl:
- __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+ if (instr->addressing_mode() != kMode_None) {
+ __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+ } else if (instr->InputAt(0)->IsRegister()) {
+ __ movsxwl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ movsxwl(i.OutputRegister(), i.InputOperand(0));
+ }
+ __ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
__ movzxwl(i.OutputRegister(), i.MemoryOperand());
+ __ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
int index = 0;
@@ -526,15 +845,15 @@
case kX64Movl:
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ movl(i.OutputRegister(), input.reg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movl(i.OutputRegister(), i.InputRegister(0));
} else {
- __ movl(i.OutputRegister(), input.operand);
+ __ movl(i.OutputRegister(), i.InputOperand(0));
}
} else {
__ movl(i.OutputRegister(), i.MemoryOperand());
}
+ __ AssertZeroExtended(i.OutputRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -546,11 +865,10 @@
}
break;
case kX64Movsxlq: {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ movsxlq(i.OutputRegister(), input.reg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movsxlq(i.OutputRegister(), i.InputRegister(0));
} else {
- __ movsxlq(i.OutputRegister(), input.operand);
+ __ movsxlq(i.OutputRegister(), i.InputOperand(0));
}
break;
}
@@ -570,12 +888,10 @@
case kX64Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
- __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
- __ movss(operand, xmm0);
+ __ movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd:
@@ -587,15 +903,57 @@
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kX64Lea32: {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+ // and addressing mode just happens to work out. The "addl"/"subl" forms
+ // in these cases are faster based on measurements.
+ if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (mode == kMode_MRI) {
+ int32_t constant_summand = i.InputInt32(1);
+ if (constant_summand > 0) {
+ __ addl(i.OutputRegister(), Immediate(constant_summand));
+ } else if (constant_summand < 0) {
+ __ subl(i.OutputRegister(), Immediate(-constant_summand));
+ }
+ } else if (mode == kMode_MR1) {
+ if (i.InputRegister(1).is(i.OutputRegister())) {
+ __ shll(i.OutputRegister(), Immediate(1));
+ } else {
+ __ leal(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else if (mode == kMode_M2) {
+ __ shll(i.OutputRegister(), Immediate(1));
+ } else if (mode == kMode_M4) {
+ __ shll(i.OutputRegister(), Immediate(2));
+ } else if (mode == kMode_M8) {
+ __ shll(i.OutputRegister(), Immediate(3));
+ } else {
+ __ leal(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else {
+ __ leal(i.OutputRegister(), i.MemoryOperand());
+ }
+ __ AssertZeroExtended(i.OutputRegister());
+ break;
+ }
+ case kX64Lea:
+ __ leaq(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX64Dec32:
+ __ decl(i.OutputRegister());
+ break;
+ case kX64Inc32:
+ __ incl(i.OutputRegister());
+ break;
case kX64Push:
if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
} else {
- RegisterOrOperand input = i.InputRegisterOrOperand(0);
- if (input.type == kRegister) {
- __ pushq(input.reg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ pushq(i.InputRegister(0));
} else {
- __ pushq(input.operand);
+ __ pushq(i.InputOperand(0));
}
}
break;
@@ -606,31 +964,59 @@
__ movsxlq(index, index);
__ movq(Operand(object, index, times_1, 0), value);
__ leaq(index, Operand(object, index, times_1, 0));
- SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
__ RecordWrite(object, index, value, mode);
break;
}
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movb);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movw);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movl);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ break;
}
}
// Assembles branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
- FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
X64OperandConverter i(this, instr);
- Label done;
-
- // Emit a branch. The true and false targets are always the last two inputs
- // to the instruction.
- BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
- BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
- bool fallthru = IsNextInAssemblyOrder(fblock);
- Label* tlabel = code()->GetLabel(tblock);
- Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
- Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
- switch (condition) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ switch (branch->condition) {
case kUnorderedEqual:
__ j(parity_even, flabel, flabel_distance);
// Fall through.
@@ -686,8 +1072,12 @@
__ j(no_overflow, tlabel);
break;
}
- if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
- __ bind(&done);
+ if (!branch->fallthru) __ jmp(flabel, flabel_distance);
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
@@ -700,7 +1090,7 @@
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
Condition cc = no_condition;
switch (condition) {
@@ -802,27 +1192,10 @@
frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
- Label ok;
- StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
- __ movp(rcx, args.GetReceiverOperand());
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &ok, Label::kNear);
- __ movp(rcx, GlobalObjectOperand());
- __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
- __ movp(args.GetReceiverOperand(), rcx);
- __ bind(&ok);
- }
-
} else {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
@@ -899,31 +1272,57 @@
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
+ Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
- Immediate64 imm = g.ToImmediate64(constant_source);
- switch (imm.type) {
- case kImm64Value:
- __ Set(dst, imm.value);
+ switch (src.type()) {
+ case Constant::kInt32:
+ // TODO(dcarney): don't need scratch in this case.
+ __ Set(dst, src.ToInt32());
break;
- case kImm64Reference:
- __ Move(dst, imm.reference);
+ case Constant::kInt64:
+ __ Set(dst, src.ToInt64());
break;
- case kImm64Handle:
- __ Move(dst, imm.handle);
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ Move(dst, src.ToExternalReference());
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
}
if (destination->IsStackSlot()) {
__ movq(g.ToOperand(destination), kScratchRegister);
}
- } else {
- __ movq(kScratchRegister,
- bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
+ } else if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
if (destination->IsDoubleRegister()) {
- __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+ __ Move(g.ToDoubleRegister(destination), src_const);
} else {
DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movl(dst, Immediate(src_const));
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
+ if (destination->IsDoubleRegister()) {
+ __ Move(g.ToDoubleRegister(destination), src_const);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ movq(kScratchRegister, src_const);
__ movq(g.ToOperand(destination), kScratchRegister);
}
}
@@ -985,7 +1384,7 @@
__ movsd(xmm0, src);
__ movsd(src, dst);
__ movsd(dst, xmm0);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
@@ -1005,7 +1404,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();