Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
new file mode 100644
index 0000000..1ec174d
--- /dev/null
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -0,0 +1,876 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+ ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ SBit OutputSBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetCC;
+ case kFlags_none:
+ return LeaveCC;
+ }
+ UNREACHABLE();
+ return LeaveCC;
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ Operand InputOperand2(int first_index) {
+ const int index = first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Offset_RI:
+ case kMode_Offset_RR:
+ break;
+ case kMode_Operand2_I:
+ return InputImmediate(index + 0);
+ case kMode_Operand2_R:
+ return Operand(InputRegister(index + 0));
+ case kMode_Operand2_R_ASR_I:
+ return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
+ case kMode_Operand2_R_ASR_R:
+ return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
+ case kMode_Operand2_R_LSL_I:
+ return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
+ case kMode_Operand2_R_LSL_R:
+ return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
+ case kMode_Operand2_R_LSR_I:
+ return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
+ case kMode_Operand2_R_LSR_R:
+ return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
+ case kMode_Operand2_R_ROR_I:
+ return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
+ case kMode_Operand2_R_ROR_R:
+ return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand InputOffset(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Operand2_I:
+ case kMode_Operand2_R:
+ case kMode_Operand2_R_ASR_I:
+ case kMode_Operand2_R_ASR_R:
+ case kMode_Operand2_R_LSL_I:
+ case kMode_Operand2_R_LSL_R:
+ case kMode_Operand2_R_LSR_I:
+ case kMode_Operand2_R_LSR_R:
+ case kMode_Operand2_R_ROR_I:
+ case kMode_Operand2_R_ROR_R:
+ break;
+ case kMode_Offset_RI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_Offset_RR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand InputOffset() {
+ int index = 0;
+ return InputOffset(&index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ add(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ }
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArchJmp:
+ __ b(code_->GetLabel(i.InputBlock(0)));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchRet:
+ AssembleReturn();
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmAdd:
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmAnd:
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBic:
+ __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmMul:
+ __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputSBit());
+ break;
+ case kArmMla:
+ __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2), i.OutputSBit());
+ break;
+ case kArmMls: {
+ CpuFeatureScope scope(masm(), MLS);
+ __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmSdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmMov:
+ __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+ break;
+ case kArmMvn:
+ __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+ break;
+ case kArmOrr:
+ __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmEor:
+ __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmSub:
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmRsb:
+ __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBfc: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUbfx: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCmp:
+ __ cmp(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmCmn:
+ __ cmn(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTst:
+ __ tst(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTeq:
+ __ teq(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVcmpF64:
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVaddF64:
+ __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVsubF64:
+ __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmulF64:
+ __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlaF64:
+ __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlsF64:
+ __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVdivF64:
+ __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmodF64: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVnegF64:
+ __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArmVsqrtF64:
+ __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArmVcvtF64S32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF64U32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtS32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtU32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLdrb:
+ __ ldrb(i.OutputRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmLdrsb:
+ __ ldrsb(i.OutputRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmStrb: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strb(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLdrh:
+ __ ldrh(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmLdrsh:
+ __ ldrsh(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStrh: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strh(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLdr:
+ __ ldr(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStr: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ str(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVldr32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vldr(scratch, i.InputOffset());
+ __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVstr32: {
+ int index = 0;
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ MemOperand operand = i.InputOffset(&index);
+ __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
+ __ vstr(scratch, operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVldr64:
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVstr64: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ vstr(i.InputDoubleRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmPush:
+ __ Push(i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmStoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ add(index, object, index);
+ __ str(value, MemOperand(index));
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+ __ RecordWrite(object, index, value, lr_status, mode);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ b(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ b(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ b(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ b(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ b(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ b(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ b(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ b(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ b(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ b(hi, tlabel);
+ break;
+ case kOverflow:
+ __ b(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ b(vc, tlabel);
+ break;
+ }
+ if (!fallthru) __ b(flabel); // no fallthru to flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnorderedLessThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
+ }
+ __ bind(&check);
+ __ mov(reg, Operand(0));
+ __ mov(reg, Operand(1), LeaveCC, cc);
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ bool saved_pp;
+ if (FLAG_enable_ool_constant_pool) {
+ __ Push(lr, fp, pp);
+ // Adjust FP to point to saved FP.
+ __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ saved_pp = true;
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ saved_pp = false;
+ }
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0 || saved_pp) {
+ // Save callee-saved registers.
+ int register_save_area_size = saved_pp ? kPointerSize : 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ stm(db_w, sp, saves);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+ __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ add(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ ldm(ia_w, sp, saves);
+ }
+ }
+ __ LeaveFrame(StackFrame::MANUAL);
+ __ Ret();
+ } else {
+ __ LeaveFrame(StackFrame::MANUAL);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ Drop(pop_count);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ str(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ ldr(temp, src);
+ __ str(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ Constant src = g.ToConstant(source);
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ UNREACHABLE();
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ }
+ if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
+ } else if (destination->IsDoubleRegister()) {
+ DwVfpRegister result = g.ToDoubleRegister(destination);
+ __ vmov(result, g.ToDouble(source));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vmov(temp, g.ToDouble(source));
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
+ } else {
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(temp, src);
+ __ ldr(src, dst);
+ __ str(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ SwVfpRegister temp_1 = kScratchDoubleReg.low();
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ ldr(temp_0, src);
+ __ vldr(temp_1, dst);
+ __ str(temp_0, dst);
+ __ vstr(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister temp = kScratchDoubleReg;
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Register temp_0 = kScratchReg;
+ DwVfpRegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // On 32-bit ARM we do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
new file mode 100644
index 0000000..7849ca9
--- /dev/null
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVldr32) \
+ V(ArmVstr32) \
+ V(ArmVldr64) \
+ V(ArmVstr64) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(Offset_RI) /* [%r0 + K] */ \
+ V(Offset_RR) /* [%r0 + %r1] */ \
+ V(Operand2_I) /* K */ \
+ V(Operand2_R) /* %r0 */ \
+ V(Operand2_R_ASR_I) /* %r0 ASR K */ \
+ V(Operand2_R_LSL_I) /* %r0 LSL K */ \
+ V(Operand2_R_LSR_I) /* %r0 LSR K */ \
+ V(Operand2_R_ROR_I) /* %r0 ROR K */ \
+ V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+ V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+ V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \
+ V(Operand2_R_ROR_R) /* %r0 ROR %r1 */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/src/compiler/arm/instruction-selector-arm-unittest.cc b/src/compiler/arm/instruction-selector-arm-unittest.cc
new file mode 100644
index 0000000..208d2e9
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm-unittest.cc
@@ -0,0 +1,1900 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+// Data processing instructions.
+struct DPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+ ArchOpcode test_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
+ return os << dpi.constructor_name;
+}
+
+
+static const DPI kDPIs[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+
+
+// Data processing instructions with overflow.
+struct ODPI {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
+ return os << odpi.constructor_name;
+}
+
+
+static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+ "Int32AddWithOverflow", kArmAdd, kArmAdd},
+ {&RawMachineAssembler::Int32SubWithOverflow,
+ "Int32SubWithOverflow", kArmSub, kArmRsb}};
+
+
+// Shifts.
+struct Shift {
+ Constructor constructor;
+ const char* constructor_name;
+ int32_t i_low; // lowest possible immediate
+ int32_t i_high; // highest possible immediate
+ AddressingMode i_mode; // Operand2_R_<shift>_I
+ AddressingMode r_mode; // Operand2_R_<shift>_R
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+ return os << shift.constructor_name;
+}
+
+
+static const Shift kShifts[] = {
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+ kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+ kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+ kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+ kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+ -2147483617, -2147483606, -2113929216, -2080374784, -1996488704,
+ -1879048192, -1459617792, -1358954496, -1342177265, -1275068414,
+ -1073741818, -1073741777, -855638016, -805306368, -402653184,
+ -268435444, -16777216, 0, 35, 61,
+ 105, 116, 171, 245, 255,
+ 692, 1216, 1248, 1520, 1600,
+ 1888, 3744, 4080, 5888, 8384,
+ 9344, 9472, 9792, 13312, 15040,
+ 15360, 20736, 22272, 23296, 32000,
+ 33536, 37120, 45824, 47872, 56320,
+ 59392, 65280, 72704, 101376, 147456,
+ 161792, 164864, 167936, 173056, 195584,
+ 209920, 212992, 356352, 655360, 704512,
+ 716800, 851968, 901120, 1044480, 1523712,
+ 2572288, 3211264, 3588096, 3833856, 3866624,
+ 4325376, 5177344, 6488064, 7012352, 7471104,
+ 14090240, 16711680, 19398656, 22282240, 28573696,
+ 30408704, 30670848, 43253760, 54525952, 55312384,
+ 56623104, 68157440, 115343360, 131072000, 187695104,
+ 188743680, 195035136, 197132288, 203423744, 218103808,
+ 267386880, 268435470, 285212672, 402653185, 415236096,
+ 595591168, 603979776, 603979778, 629145600, 1073741835,
+ 1073741855, 1073741861, 1073741884, 1157627904, 1476395008,
+ 1476395010, 1610612741, 2030043136, 2080374785, 2097152000};
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions.
+
+
+typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
+
+
+TEST_P(InstructionSelectorDPITest, Parameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorDPITest, Immediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
+ &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm))),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch((m.*dpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(5U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
+ const DPI dpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(
+ m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32Equal(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
+ const DPI dpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ m.Branch(m.Word32NotEqual(
+ (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+ ::testing::ValuesIn(kDPIs));
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 1, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(m.Parameter(0),
+ (m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Projection(
+ 0, (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* n = (m.*odpi.constructor)(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+ Stream s = m.Build();
+ ASSERT_LE(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
+ const ODPI odpi = GetParam();
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+ m.Branch(m.Projection(1, n), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&b);
+ m.Return(m.Projection(0, n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
+ const ODPI odpi = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ MLabel a, b;
+ Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Projection(0, n));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+ ::testing::ValuesIn(kODPIs));
+
+
+// -----------------------------------------------------------------------------
+// Shifts.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
+ const Shift shift = GetParam();
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Parameter(0),
+ (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32Equal(m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(
+ m.Int32Constant(0),
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(
+ (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+ const Shift shift = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Parameter(2)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+ const Shift shift = GetParam();
+ TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Word32Not((m.*shift.constructor)(
+ m.Parameter(1), m.Int32Constant(imm)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShifts));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode ldr_opcode;
+ ArchOpcode str_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+ OStringStream ost;
+ ost << memacc.type;
+ return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8,
+ kArmLdrsb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint8,
+ kArmLdrb,
+ kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
+ -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
+ 39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
+ {kMachInt16,
+ kArmLdrsh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
+ -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
+ 102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
+ {kMachUint16,
+ kArmLdrh,
+ kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
+ -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
+ 114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
+ {kMachInt32,
+ kArmLdr,
+ kArmStr,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
+ -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
+ 93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+ {kMachFloat32,
+ kArmVldr32,
+ kArmVstr32,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+ -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+ 24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
+ {kMachFloat64,
+ kArmVldr64,
+ kArmVstr64,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
+ -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
+ 108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Add(m.Int32Mul(m.Parameter(1), m.Parameter(2)), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+ // x * (2^k + 1) -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x -> x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k - 1) -> -x + (x >> k)
+ TRACED_FORRANGE(int32_t, k, 3, 30) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmSub, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(
+ m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+ Stream s = m.Build(MLS);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMls, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(3U, s[0]->InputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+ EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s[3]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+ EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+ ASSERT_EQ(1U, s[4]->OutputCount());
+ ASSERT_EQ(2U, s[4]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+ ASSERT_EQ(1U, s[5]->OutputCount());
+ ASSERT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIV) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(SUDIV);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIVAndMLS) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build(MLS, SUDIV);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, width, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(
+ m.Parameter(0),
+ m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+ m.Parameter(0)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t max = 1 << lsb;
+ if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+ uint32_t jnk = rng()->NextInt(max);
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ TRACED_FOREACH(int32_t, imm, kImmediates) {
+ if (imm == 0) continue;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Not(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ m.Int32Constant(0xffffffffu >> (32 - width))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
new file mode 100644
index 0000000..ae93b27
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -0,0 +1,950 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator FINAL : public OperandGenerator {
+ public:
+ explicit ArmOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode) {
+ Int32Matcher m(node);
+ if (!m.HasValue()) return false;
+ int32_t value = m.Value();
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArmAnd:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBic:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(~value);
+
+ case kArmAdd:
+ case kArmSub:
+ case kArmCmp:
+ case kArmCmn:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(-value);
+
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmRsb:
+ return ImmediateFitsAddrMode1Instruction(value);
+
+ case kArmVldr32:
+ case kArmVstr32:
+ case kArmVldr64:
+ case kArmVstr64:
+ return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+ case kArmLdrb:
+ case kArmLdrsb:
+ case kArmStrb:
+ case kArmLdr:
+ case kArmStr:
+ case kArmStoreWriteBarrier:
+ return value >= -4095 && value <= 4095;
+
+ case kArmLdrh:
+ case kArmLdrsh:
+ case kArmStrh:
+ return value >= -255 && value <= 255;
+
+ case kArchCallCodeObject:
+ case kArchCallJSFunction:
+ case kArchJmp:
+ case kArchNop:
+ case kArchRet:
+ case kArchTruncateDoubleToI:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVnegF64:
+ case kArmVsqrtF64:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ case kArmPush:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+ }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static bool TryMatchROR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Ror) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 31)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchASR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Sar) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 32)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchLSL(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Shl) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(0, 31)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchLSR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Shr) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 32)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchShift(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ return (
+ TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchROR(selector, opcode_return, node, value_return, shift_return));
+}
+
+
+static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
+ InstructionCode* opcode_return,
+ Node* node,
+ size_t* input_count_return,
+ InstructionOperand** inputs) {
+ ArmOperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
+ *input_count_return = 2;
+ return true;
+ }
+ return false;
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[5];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+ m.left().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, reverse_opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kArmVldr32;
+ break;
+ case kRepFloat64:
+ opcode = kArmVldr64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kArmLdr;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+ Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+ g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kArmVstr32;
+ break;
+ case kRepFloat64:
+ opcode = kArmVstr64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArmStrb;
+ break;
+ case kRepWord16:
+ opcode = kArmStrh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kArmStr;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
+}
+
+
+static inline void EmitBic(InstructionSelector* selector, Node* node,
+ Node* left, Node* right) {
+ ArmOperandGenerator g(selector);
+ InstructionCode opcode = kArmBic;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ value_operand, shift_operand);
+ return;
+ }
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(-1)) {
+ EmitBic(this, node, m.right().node(), mleft.left().node());
+ return;
+ }
+ }
+ if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().Is(-1)) {
+ EmitBic(this, node, m.left().node(), mright.left().node());
+ return;
+ }
+ }
+ if (IsSupported(ARMv7) && m.right().HasValue()) {
+ uint32_t value = m.right().Value();
+ uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t msb = base::bits::CountLeadingZeros32(value);
+ if (width != 0 && msb + width == 32) {
+ DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
+ if (m.left().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
+ return;
+ }
+ }
+ Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(width));
+ return;
+ }
+ // Try to interpret this AND as BFC.
+ width = 32 - width;
+ msb = base::bits::CountLeadingZeros32(~value);
+ uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+ if (msb + width + lsb == 32) {
+ Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(lsb), g.TempImmediate(width));
+ return;
+ }
+ }
+ VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ InstructionCode opcode = kArmMvn;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
+ &shift_operand)) {
+ Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmEor, kArmEor);
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ InstructionCode opcode = kArmMov;
+ InstructionOperand* inputs[4];
+ size_t input_count = 2;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift) {
+ FlagsContinuation cont;
+ VisitShift(selector, node, try_match_shift, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitShift(this, node, TryMatchLSL);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (IsSupported(ARMv7) && m.left().IsWord32And() &&
+ m.right().IsInRange(0, 31)) {
+ int32_t lsb = m.right().Value();
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t width = base::bits::CountPopulation32(value);
+ uint32_t msb = base::bits::CountLeadingZeros32(value);
+ if (msb + width + lsb == 32) {
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(width));
+ return;
+ }
+ }
+ }
+ VisitShift(this, node, TryMatchLSR);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, TryMatchASR);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitShift(this, node, TryMatchROR);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+ CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int32_t value = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(value - 1)) {
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ return;
+ }
+ if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
+ Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ return;
+ }
+ }
+ Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+ InstructionOperand* result_operand,
+ InstructionOperand* left_operand,
+ InstructionOperand* right_operand) {
+ ArmOperandGenerator g(selector);
+ if (selector->IsSupported(SUDIV)) {
+ selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+ return;
+ }
+ InstructionOperand* left_double_operand = g.TempDoubleRegister();
+ InstructionOperand* right_double_operand = g.TempDoubleRegister();
+ InstructionOperand* result_double_operand = g.TempDoubleRegister();
+ selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+ selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+ selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+ right_double_operand);
+ selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* div_operand = g.TempRegister();
+ InstructionOperand* result_operand = g.DefineAsRegister(node);
+ InstructionOperand* left_operand = g.UseRegister(m.left().node());
+ InstructionOperand* right_operand = g.UseRegister(m.right().node());
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+ left_operand, right_operand);
+ if (selector->IsSupported(MLS)) {
+ selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+ left_operand);
+ return;
+ }
+ InstructionOperand* mul_operand = g.TempRegister();
+ selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+ selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.right().Is(-1.0)) {
+ Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitRRRFloat64(this, kArmVmulF64, node);
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ ArmOperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(call, &buffer, true, false);
+
+ // TODO(dcarney): might be possible to use claim/poke instead
+ // Push any stack arguments.
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ Emit(kArmPush, NULL, g.UseRegister(*input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArmAdd, kArmAdd, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArmSub, kArmRsb, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[5];
+ size_t input_count = 0;
+ InstructionOperand* outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ if (!commutative) cont->Commute();
+ inputs[0] = g.UseRegister(m.right().node());
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ } else {
+ DCHECK(cont->IsSet());
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, node, kArmCmn, cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kArmCmp, cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kArmTst, cont, true);
+ case IrOpcode::kWord32Or:
+ return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
+ case IrOpcode::kWord32Xor:
+ return VisitWordCompare(this, node, kArmTeq, cont, true);
+ case IrOpcode::kWord32Sar:
+ return VisitShift(this, node, TryMatchASR, cont);
+ case IrOpcode::kWord32Shl:
+ return VisitShift(this, node, TryMatchLSL, cont);
+ case IrOpcode::kWord32Shr:
+ return VisitShift(this, node, TryMatchLSR, cont);
+ case IrOpcode::kWord32Ror:
+ return VisitShift(this, node, TryMatchROR, cont);
+ default:
+ break;
+ }
+
+ ArmOperandGenerator g(this);
+ InstructionCode opcode =
+ cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+ if (cont->IsBranch()) {
+ Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+ g.UseRegister(node));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArmCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (cont->IsBranch()) {
+ Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
new file mode 100644
index 0000000..6673a47
--- /dev/null
+++ b/src/compiler/arm/linkage-arm.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct ArmLinkageHelperTraits {
+ static Register ReturnValueReg() { return r0; }
+ static Register ReturnValue2Reg() { return r1; }
+ static Register JSCallFunctionReg() { return r1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return r1; }
+ static Register RuntimeCallArgCountReg() { return r0; }
+ static RegList CCalleeSaveRegisters() {
+ return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
+ r10.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {r0, r1, r2, r3};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<ArmLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8