Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/compiler/ppc/OWNERS b/src/compiler/ppc/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/compiler/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
new file mode 100644
index 0000000..154cd64
--- /dev/null
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -0,0 +1,1868 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r11
+
+
+// Adds PPC-specific methods to convert InstructionOperands.
+class PPCOperandConverter final : public InstructionOperandConverter {
+ public:
+  PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  size_t OutputCount() { return instr_->OutputCount(); }
+
+  RCBit OutputRCBit() const {
+    switch (instr_->flags_mode()) {
+      case kFlags_branch:
+      case kFlags_set:
+        return SetRC;
+      case kFlags_none:
+        return LeaveRC;
+    }
+    UNREACHABLE();
+    return LeaveRC;
+  }
+
+  bool CompareLogical() const {
+    switch (instr_->flags_condition()) {
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        return true;
+      default:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+  Operand InputImmediate(size_t index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+#if V8_TARGET_ARCH_PPC64
+        return Operand(constant.ToInt64());
+#endif
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+      case Constant::kRpoNumber:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+    const size_t index = *first_index;
+    *mode = AddressingModeField::decode(instr_->opcode());
+    switch (*mode) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+    return MemoryOperand(mode, &first_index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK_NOT_NULL(op);
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    FrameOffset offset = frame_access_state()->GetFrameOffset(
+        AllocatedOperand::cast(op)->index());
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadNAN32 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNAN64 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero final : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final { __ li(result_, Operand::Zero()); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(offset),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  void Generate() final {
+    if (mode_ > RecordWriteMode::kValueIsPointer) {
+      __ JumpIfSmi(value_, exit());
+    }
+    if (mode_ > RecordWriteMode::kValueIsMap) {
+      __ CheckPageFlag(value_, scratch0_,
+                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                       exit());
+    }
+    SaveFPRegsMode const save_fp_mode =
+        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+    // TODO(turbofan): Once we get frame elision working, we need to save
+    // and restore lr properly here if the frame was elided.
+    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+                         EMIT_REMEMBERED_SET, save_fp_mode);
+    __ add(scratch1_, object_, offset_);
+    __ CallStub(&stub);
+  }
+
+ private:
+  Register const object_;
+  Register const offset_;
+  Register const value_;
+  Register const scratch0_;
+  Register const scratch1_;
+  RecordWriteMode const mode_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
+  switch (condition) {
+    case kEqual:
+      return eq;
+    case kNotEqual:
+      return ne;
+    case kSignedLessThan:
+    case kUnsignedLessThan:
+      return lt;
+    case kSignedGreaterThanOrEqual:
+    case kUnsignedGreaterThanOrEqual:
+      return ge;
+    case kSignedLessThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+      return le;
+    case kSignedGreaterThan:
+    case kUnsignedGreaterThan:
+      return gt;
+    case kOverflow:
+      // Overflow checked for add/sub only.
+      switch (op) {
+#if V8_TARGET_ARCH_PPC64
+        case kPPC_Add:
+        case kPPC_Sub:
+          return lt;
+#endif
+        case kPPC_AddWithOverflow32:
+        case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+          return ne;
+#else
+          return lt;
+#endif
+        default:
+          break;
+      }
+      break;
+    case kNotOverflow:
+      switch (op) {
+#if V8_TARGET_ARCH_PPC64
+        case kPPC_Add:
+        case kPPC_Sub:
+          return ge;
+#endif
+        case kPPC_AddWithOverflow32:
+        case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+          return eq;
+#else
+          return ge;
+#endif
+        default:
+          break;
+      }
+      break;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return kNoCondition;
+}
+
+}  // namespace
+
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr)                            \
+  do {                                                               \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                 i.OutputRCBit());                                   \
+  } while (0)
+
+
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr)                           \
+  do {                                                               \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                 i.InputDoubleRegister(1), i.OutputRCBit());         \
+  } while (0)
+
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1));                    \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputImmediate(1));                   \
+    }                                                          \
+  } while (0)
+
+
+#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)        \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1), i.OutputRCBit());   \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputImmediate(1), i.OutputRCBit());  \
+    }                                                          \
+  } while (0)
+
+
+#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)    \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1), i.OutputRCBit());   \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputInt32(1), i.OutputRCBit());      \
+    }                                                          \
+  } while (0)
+
+
+#define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputInt32(1), kScratchReg, r0);      \
+    }                                                                   \
+  } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                -i.InputInt32(1), kScratchReg, r0);     \
+    }                                                                   \
+  } while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW32()           \
+  do {                                           \
+    ASSEMBLE_BINOP(add, addi);                   \
+    __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+  } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32()           \
+  do {                                           \
+    ASSEMBLE_BINOP(sub, subi);                   \
+    __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+  } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
+#endif
+
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                        \
+  do {                                                                 \
+    const CRegister cr = cr0;                                          \
+    if (HasRegisterInput(instr, 1)) {                                  \
+      if (i.CompareLogical()) {                                        \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr);     \
+      } else {                                                         \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr);      \
+      }                                                                \
+    } else {                                                           \
+      if (i.CompareLogical()) {                                        \
+        __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+      } else {                                                         \
+        __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr);  \
+      }                                                                \
+    }                                                                  \
+    DCHECK_EQ(SetRC, i.OutputRCBit());                                 \
+  } while (0)
+
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                                 \
+  do {                                                                    \
+    const CRegister cr = cr0;                                             \
+    __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
+    DCHECK_EQ(SetRC, i.OutputRCBit());                                    \
+  } while (0)
+
+
+#define ASSEMBLE_MODULO(div_instr, mul_instr)                        \
+  do {                                                               \
+    const Register scratch = kScratchReg;                            \
+    __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1));   \
+    __ mul_instr(scratch, scratch, i.InputRegister(1));              \
+    __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
+           i.OutputRCBit());                                         \
+  } while (0)
+
+
+#define ASSEMBLE_FLOAT_MODULO()                                               \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
+                            i.InputDoubleRegister(1));                        \
+    __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+                     0, 2);                                                   \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                                      \
+  } while (0)
+
+
+#define ASSEMBLE_FLOAT_MAX(scratch_reg)                                       \
+  do {                                                                        \
+    __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+    __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0),  \
+            i.InputDoubleRegister(1));                                        \
+  } while (0)
+
+
+#define ASSEMBLE_FLOAT_MIN(scratch_reg)                                       \
+  do {                                                                        \
+    __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+    __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1),  \
+            i.InputDoubleRegister(0));                                        \
+  } while (0)
+
+
+#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx)    \
+  do {                                                \
+    DoubleRegister result = i.OutputDoubleRegister(); \
+    AddressingMode mode = kMode_None;                 \
+    MemOperand operand = i.MemoryOperand(&mode);      \
+    if (mode == kMode_MRI) {                          \
+      __ asm_instr(result, operand);                  \
+    } else {                                          \
+      __ asm_instrx(result, operand);                 \
+    }                                                 \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());              \
+  } while (0)
+
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
+  do {                                               \
+    Register result = i.OutputRegister();            \
+    AddressingMode mode = kMode_None;                \
+    MemOperand operand = i.MemoryOperand(&mode);     \
+    if (mode == kMode_MRI) {                         \
+      __ asm_instr(result, operand);                 \
+    } else {                                         \
+      __ asm_instrx(result, operand);                \
+    }                                                \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());             \
+  } while (0)
+
+
+#define ASSEMBLE_STORE_FLOAT32()                         \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    __ frsp(kScratchDoubleReg, value);                   \
+    if (mode == kMode_MRI) {                             \
+      __ stfs(kScratchDoubleReg, operand);               \
+    } else {                                             \
+      __ stfsx(kScratchDoubleReg, operand);              \
+    }                                                    \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
+  } while (0)
+
+
+#define ASSEMBLE_STORE_DOUBLE()                          \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    if (mode == kMode_MRI) {                             \
+      __ stfd(value, operand);                           \
+    } else {                                             \
+      __ stfdx(value, operand);                          \
+    }                                                    \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
+  } while (0)
+
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx)    \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    Register value = i.InputRegister(index);             \
+    if (mode == kMode_MRI) {                             \
+      __ asm_instr(value, operand);                      \
+    } else {                                             \
+      __ asm_instrx(value, operand);                     \
+    }                                                    \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width)  \
+  do {                                                             \
+    DoubleRegister result = i.OutputDoubleRegister();              \
+    size_t index = 0;                                              \
+    AddressingMode mode = kMode_None;                              \
+    MemOperand operand = i.MemoryOperand(&mode, index);            \
+    DCHECK_EQ(kMode_MRR, mode);                                    \
+    Register offset = operand.rb();                                \
+    __ extsw(offset, offset);                                      \
+    if (HasRegisterInput(instr, 2)) {                              \
+      __ cmplw(offset, i.InputRegister(2));                        \
+    } else {                                                       \
+      __ cmplwi(offset, i.InputImmediate(2));                      \
+    }                                                              \
+    auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+    __ bge(ool->entry());                                          \
+    if (mode == kMode_MRI) {                                       \
+      __ asm_instr(result, operand);                               \
+    } else {                                                       \
+      __ asm_instrx(result, operand);                              \
+    }                                                              \
+    __ bind(ool->exit());                                          \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                           \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
+  do {                                                       \
+    Register result = i.OutputRegister();                    \
+    size_t index = 0;                                        \
+    AddressingMode mode = kMode_None;                        \
+    MemOperand operand = i.MemoryOperand(&mode, index);      \
+    DCHECK_EQ(kMode_MRR, mode);                              \
+    Register offset = operand.rb();                          \
+    __ extsw(offset, offset);                                \
+    if (HasRegisterInput(instr, 2)) {                        \
+      __ cmplw(offset, i.InputRegister(2));                  \
+    } else {                                                 \
+      __ cmplwi(offset, i.InputImmediate(2));                \
+    }                                                        \
+    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+    __ bge(ool->entry());                                    \
+    if (mode == kMode_MRI) {                                 \
+      __ asm_instr(result, operand);                         \
+    } else {                                                 \
+      __ asm_instrx(result, operand);                        \
+    }                                                        \
+    __ bind(ool->exit());                                    \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ extsw(offset, offset);                           \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ cmplw(offset, i.InputRegister(2));             \
+    } else {                                            \
+      __ cmplwi(offset, i.InputImmediate(2));           \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ frsp(kScratchDoubleReg, value);                  \
+    if (mode == kMode_MRI) {                            \
+      __ stfs(kScratchDoubleReg, operand);              \
+    } else {                                            \
+      __ stfsx(kScratchDoubleReg, operand);             \
+    }                                                   \
+    __ bind(&done);                                     \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ extsw(offset, offset);                           \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ cmplw(offset, i.InputRegister(2));             \
+    } else {                                            \
+      __ cmplwi(offset, i.InputImmediate(2));           \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    if (mode == kMode_MRI) {                            \
+      __ stfd(value, operand);                          \
+    } else {                                            \
+      __ stfdx(value, operand);                         \
+    }                                                   \
+    __ bind(&done);                                     \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
+  do {                                                        \
+    Label done;                                               \
+    size_t index = 0;                                         \
+    AddressingMode mode = kMode_None;                         \
+    MemOperand operand = i.MemoryOperand(&mode, index);       \
+    DCHECK_EQ(kMode_MRR, mode);                               \
+    Register offset = operand.rb();                           \
+    __ extsw(offset, offset);                                 \
+    if (HasRegisterInput(instr, 2)) {                         \
+      __ cmplw(offset, i.InputRegister(2));                   \
+    } else {                                                  \
+      __ cmplwi(offset, i.InputImmediate(2));                 \
+    }                                                         \
+    __ bge(&done);                                            \
+    Register value = i.InputRegister(3);                      \
+    if (mode == kMode_MRI) {                                  \
+      __ asm_instr(value, operand);                           \
+    } else {                                                  \
+      __ asm_instrx(value, operand);                          \
+    }                                                         \
+    __ bind(&done);                                           \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
+  } while (0)
+
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta > 0) {
+    __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+  }
+  frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta < 0) {
+    __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+  }
+  if (frame()->needs_frame()) {
+    if (FLAG_enable_embedded_constant_pool) {
+      __ LoadP(kConstantPoolRegister,
+               MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+    }
+    __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ mtlr(r0);
+  }
+  frame_access_state()->SetFrameAccessToSP();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  PPCOperandConverter i(this, instr);
+  ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+  switch (opcode) {
+    case kArchCallCodeObject: {
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      EnsureSpaceForLazyDeopt();
+      if (HasRegisterInput(instr, 0)) {
+        __ addi(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Call(ip);
+      } else {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      RecordCallPosition(instr);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallCodeObject: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      if (HasRegisterInput(instr, 0)) {
+        __ addi(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Jump(ip);
+      } else {
+        // We cannot use the constant pool to load the target since
+        // we've already restored the caller's frame.
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+        __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchCallJSFunction: {
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+      RecordCallPosition(instr);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallJSFunction: {
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Jump(ip);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchLazyBailout: {
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      EnsureSpaceForLazyDeopt();
+      RecordCallPosition(instr);
+      break;
+    }
+    case kArchPrepareCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      __ PrepareCallCFunction(num_parameters, kScratchReg);
+      // Frame alignment requires using FP-relative frame addressing.
+      frame_access_state()->SetFrameAccessToFP();
+      break;
+    }
+    case kArchPrepareTailCall:
+      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      break;
+    case kArchCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      if (instr->InputAt(0)->IsImmediate()) {
+        ExternalReference ref = i.InputExternalReference(0);
+        __ CallCFunction(ref, num_parameters);
+      } else {
+        Register func = i.InputRegister(0);
+        __ CallCFunction(func, num_parameters);
+      }
+      frame_access_state()->SetFrameAccessToDefault();
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchLookupSwitch:
+      AssembleArchLookupSwitch(instr);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchTableSwitch:
+      AssembleArchTableSwitch(instr);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchNop:
+    case kArchThrowTerminator:
+      // don't emit code for nops.
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchDeoptimize: {
+      int deopt_state_id =
+          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+      Deoptimizer::BailoutType bailout_type =
+          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      break;
+    }
+    case kArchRet:
+      AssembleReturn();
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchStackPointer:
+      __ mr(i.OutputRegister(), sp);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchFramePointer:
+      __ mr(i.OutputRegister(), fp);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchTruncateDoubleToI:
+      // TODO(mbrandy): move slow call to stub out of line.
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kArchStoreWithWriteBarrier: {
+      RecordWriteMode mode =
+          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+      Register object = i.InputRegister(0);
+      Register offset = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      Register scratch0 = i.TempRegister(0);
+      Register scratch1 = i.TempRegister(1);
+      auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                   scratch0, scratch1, mode);
+      __ StorePX(value, MemOperand(object, offset));
+      __ CheckPageFlag(object, scratch0,
+                       MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+                       ool->entry());
+      __ bind(ool->exit());
+      break;
+    }
+    case kPPC_And:
+      if (HasRegisterInput(instr, 1)) {
+        __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                i.OutputRCBit());
+      } else {
+        __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+      }
+      break;
+    case kPPC_AndComplement:
+      __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+              i.OutputRCBit());
+      break;
+    case kPPC_Or:
+      if (HasRegisterInput(instr, 1)) {
+        __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.OutputRCBit());
+      } else {
+        __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+        DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      }
+      break;
+    case kPPC_OrComplement:
+      __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.OutputRCBit());
+      break;
+    case kPPC_Xor:
+      if (HasRegisterInput(instr, 1)) {
+        __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                i.OutputRCBit());
+      } else {
+        __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+        DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      }
+      break;
+    case kPPC_ShiftLeft32:
+      ASSEMBLE_BINOP_RC(slw, slwi);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_ShiftLeft64:
+      ASSEMBLE_BINOP_RC(sld, sldi);
+      break;
+#endif
+    case kPPC_ShiftRight32:
+      ASSEMBLE_BINOP_RC(srw, srwi);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_ShiftRight64:
+      ASSEMBLE_BINOP_RC(srd, srdi);
+      break;
+#endif
+    case kPPC_ShiftRightAlg32:
+      ASSEMBLE_BINOP_INT_RC(sraw, srawi);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_ShiftRightAlg64:
+      ASSEMBLE_BINOP_INT_RC(srad, sradi);
+      break;
+#endif
+    case kPPC_RotRight32:
+      if (HasRegisterInput(instr, 1)) {
+        __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
+        __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+                 i.OutputRCBit());
+      } else {
+        int sh = i.InputInt32(1);
+        __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+      }
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_RotRight64:
+      if (HasRegisterInput(instr, 1)) {
+        __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
+        __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+                 i.OutputRCBit());
+      } else {
+        int sh = i.InputInt32(1);
+        __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+      }
+      break;
+#endif
+    case kPPC_Not:
+      __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
+      break;
+    case kPPC_RotLeftAndMask32:
+      __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+                31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_RotLeftAndClear64:
+      __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+               63 - i.InputInt32(2), i.OutputRCBit());
+      break;
+    case kPPC_RotLeftAndClearLeft64:
+      __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+                63 - i.InputInt32(2), i.OutputRCBit());
+      break;
+    case kPPC_RotLeftAndClearRight64:
+      __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+                63 - i.InputInt32(2), i.OutputRCBit());
+      break;
+#endif
+    case kPPC_Add:
+#if V8_TARGET_ARCH_PPC64
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_ADD_WITH_OVERFLOW();
+      } else {
+#endif
+        if (HasRegisterInput(instr, 1)) {
+          __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                 LeaveOE, i.OutputRCBit());
+        } else {
+          __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+          DCHECK_EQ(LeaveRC, i.OutputRCBit());
+        }
+#if V8_TARGET_ARCH_PPC64
+      }
+#endif
+      break;
+    case kPPC_AddWithOverflow32:
+      ASSEMBLE_ADD_WITH_OVERFLOW32();
+      break;
+    case kPPC_AddDouble:
+      ASSEMBLE_FLOAT_BINOP_RC(fadd);
+      break;
+    case kPPC_Sub:
+#if V8_TARGET_ARCH_PPC64
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_SUB_WITH_OVERFLOW();
+      } else {
+#endif
+        if (HasRegisterInput(instr, 1)) {
+          __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                 LeaveOE, i.OutputRCBit());
+        } else {
+          __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+          DCHECK_EQ(LeaveRC, i.OutputRCBit());
+        }
+#if V8_TARGET_ARCH_PPC64
+      }
+#endif
+      break;
+    case kPPC_SubWithOverflow32:
+      ASSEMBLE_SUB_WITH_OVERFLOW32();
+      break;
+    case kPPC_SubDouble:
+      ASSEMBLE_FLOAT_BINOP_RC(fsub);
+      break;
+    case kPPC_Mul32:
+      __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               LeaveOE, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Mul64:
+      __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               LeaveOE, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_MulHigh32:
+      __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.OutputRCBit());
+      break;
+    case kPPC_MulHighU32:
+      __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                i.OutputRCBit());
+      break;
+    case kPPC_MulDouble:
+      ASSEMBLE_FLOAT_BINOP_RC(fmul);
+      break;
+    case kPPC_Div32:
+      __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Div64:
+      __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_DivU32:
+      __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_DivU64:
+      __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_DivDouble:
+      ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+      break;
+    case kPPC_Mod32:
+      ASSEMBLE_MODULO(divw, mullw);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Mod64:
+      ASSEMBLE_MODULO(divd, mulld);
+      break;
+#endif
+    case kPPC_ModU32:
+      ASSEMBLE_MODULO(divwu, mullw);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_ModU64:
+      ASSEMBLE_MODULO(divdu, mulld);
+      break;
+#endif
+    case kPPC_ModDouble:
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      ASSEMBLE_FLOAT_MODULO();
+      break;
+    case kPPC_Neg:
+      __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
+      break;
+    case kPPC_MaxDouble:
+      ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
+      break;
+    case kPPC_MinDouble:
+      ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
+      break;
+    case kPPC_AbsDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(fabs);
+      break;
+    case kPPC_SqrtDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+      break;
+    case kPPC_FloorDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(frim);
+      break;
+    case kPPC_CeilDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(frip);
+      break;
+    case kPPC_TruncateDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(friz);
+      break;
+    case kPPC_RoundDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(frin);
+      break;
+    case kPPC_NegDouble:
+      ASSEMBLE_FLOAT_UNOP_RC(fneg);
+      break;
+    case kPPC_Cntlz32:
+      __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Cntlz64:
+      __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_Popcnt32:
+      __ popcntw(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Popcnt64:
+      __ popcntd(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_Cmp32:
+      ASSEMBLE_COMPARE(cmpw, cmplw);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Cmp64:
+      ASSEMBLE_COMPARE(cmp, cmpl);
+      break;
+#endif
+    case kPPC_CmpDouble:
+      ASSEMBLE_FLOAT_COMPARE(fcmpu);
+      break;
+    case kPPC_Tst32:
+      if (HasRegisterInput(instr, 1)) {
+        __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+      } else {
+        __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+#if V8_TARGET_ARCH_PPC64
+      __ extsw(r0, r0, i.OutputRCBit());
+#endif
+      DCHECK_EQ(SetRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Tst64:
+      if (HasRegisterInput(instr, 1)) {
+        __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+      } else {
+        __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+      DCHECK_EQ(SetRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_Push:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+      } else {
+        __ Push(i.InputRegister(0));
+        frame_access_state()->IncreaseSPDelta(1);
+      }
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_PushFrame: {
+      int num_slots = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ stfdu(i.InputDoubleRegister(0),
+                 MemOperand(sp, -num_slots * kPointerSize));
+      } else {
+        __ StorePU(i.InputRegister(0),
+                   MemOperand(sp, -num_slots * kPointerSize));
+      }
+      break;
+    }
+    case kPPC_StoreToStackSlot: {
+      int slot = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+      } else {
+        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+      }
+      break;
+    }
+    case kPPC_ExtendSignWord8:
+      __ extsb(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_ExtendSignWord16:
+      __ extsh(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_ExtendSignWord32:
+      __ extsw(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Uint32ToUint64:
+      // Zero extend
+      __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Int64ToInt32:
+      __ extsw(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Int64ToFloat32:
+      __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Int64ToDouble:
+      __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Uint64ToFloat32:
+      __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+                                     i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Uint64ToDouble:
+      __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+                                      i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+#endif
+    case kPPC_Int32ToDouble:
+      __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_Uint32ToDouble:
+      __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+                                    i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleToInt32:
+    case kPPC_DoubleToUint32:
+    case kPPC_DoubleToInt64: {
+#if V8_TARGET_ARCH_PPC64
+      bool check_conversion =
+          (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
+      if (check_conversion) {
+        __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
+      }
+#endif
+      __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_PPC64
+                              kScratchReg,
+#endif
+                              i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_PPC64
+      if (check_conversion) {
+        // Set 2nd output to zero if conversion fails.
+        CRegister cr = cr7;
+        int crbit = v8::internal::Assembler::encode_crbit(
+            cr, static_cast<CRBit>(VXCVI % CRWIDTH));
+        __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
+        if (CpuFeatures::IsSupported(ISELECT)) {
+          __ li(i.OutputRegister(1), Operand(1));
+          __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+        } else {
+          __ li(i.OutputRegister(1), Operand::Zero());
+          __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+          __ li(i.OutputRegister(1), Operand(1));
+        }
+      }
+#endif
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_DoubleToUint64: {
+      bool check_conversion = (i.OutputCount() > 1);
+      if (check_conversion) {
+        __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
+      }
+      __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+                                      i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        // Set 2nd output to zero if conversion fails.
+        CRegister cr = cr7;
+        int crbit = v8::internal::Assembler::encode_crbit(
+            cr, static_cast<CRBit>(VXCVI % CRWIDTH));
+        __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
+        if (CpuFeatures::IsSupported(ISELECT)) {
+          __ li(i.OutputRegister(1), Operand(1));
+          __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+        } else {
+          __ li(i.OutputRegister(1), Operand::Zero());
+          __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+          __ li(i.OutputRegister(1), Operand(1));
+        }
+      }
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    }
+#endif
+    case kPPC_DoubleToFloat32:
+      ASSEMBLE_FLOAT_UNOP_RC(frsp);
+      break;
+    case kPPC_Float32ToDouble:
+      // Nothing to do.
+      __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleExtractLowWord32:
+      __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleExtractHighWord32:
+      __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleInsertLowWord32:
+      __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleInsertHighWord32:
+      __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_DoubleConstruct:
+#if V8_TARGET_ARCH_PPC64
+      __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
+                                    i.InputRegister(0), i.InputRegister(1), r0);
+#else
+      __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
+                          i.InputRegister(1));
+#endif
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_BitcastFloat32ToInt32:
+      __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kPPC_BitcastInt32ToFloat32:
+      __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_BitcastDoubleToInt64:
+      __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kPPC_BitcastInt64ToDouble:
+      __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#endif
+    case kPPC_LoadWordU8:
+      ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+      break;
+    case kPPC_LoadWordS8:
+      ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+      __ extsb(i.OutputRegister(), i.OutputRegister());
+      break;
+    case kPPC_LoadWordU16:
+      ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+      break;
+    case kPPC_LoadWordS16:
+      ASSEMBLE_LOAD_INTEGER(lha, lhax);
+      break;
+    case kPPC_LoadWordS32:
+      ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_LoadWord64:
+      ASSEMBLE_LOAD_INTEGER(ld, ldx);
+      break;
+#endif
+    case kPPC_LoadFloat32:
+      ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
+      break;
+    case kPPC_LoadDouble:
+      ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
+      break;
+    case kPPC_StoreWord8:
+      ASSEMBLE_STORE_INTEGER(stb, stbx);
+      break;
+    case kPPC_StoreWord16:
+      ASSEMBLE_STORE_INTEGER(sth, sthx);
+      break;
+    case kPPC_StoreWord32:
+      ASSEMBLE_STORE_INTEGER(stw, stwx);
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_StoreWord64:
+      ASSEMBLE_STORE_INTEGER(std, stdx);
+      break;
+#endif
+    case kPPC_StoreFloat32:
+      ASSEMBLE_STORE_FLOAT32();
+      break;
+    case kPPC_StoreDouble:
+      ASSEMBLE_STORE_DOUBLE();
+      break;
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+      __ extsb(i.OutputRegister(), i.OutputRegister());
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+      break;
+    case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_PPC64
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
+      break;
+    case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_PPC64
+      ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT32();
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_DOUBLE();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}  // NOLINT(readability/fn_size)
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  PPCOperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  ArchOpcode op = instr->arch_opcode();
+  FlagsCondition condition = branch->condition;
+  CRegister cr = cr0;
+
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kPPC_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      __ bunordered(flabel, cr);
+      // Unnecessary for eq/lt since only FU bit will be set.
+    } else if (cond == gt) {
+      __ bunordered(tlabel, cr);
+      // Unnecessary for ne/ge since only FU bit will be set.
+    }
+  }
+  __ b(cond, tlabel, cr);
+  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  PPCOperandConverter i(this, instr);
+  Label done;
+  ArchOpcode op = instr->arch_opcode();
+  CRegister cr = cr0;
+  int reg_value = -1;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  DCHECK_NE(0u, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kPPC_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      reg_value = 0;
+      __ li(reg, Operand::Zero());
+      __ bunordered(&done, cr);
+    } else if (cond == gt) {
+      reg_value = 1;
+      __ li(reg, Operand(1));
+      __ bunordered(&done, cr);
+    }
+    // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
+  }
+
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    switch (cond) {
+      case eq:
+      case lt:
+      case gt:
+        if (reg_value != 1) __ li(reg, Operand(1));
+        __ li(kScratchReg, Operand::Zero());
+        __ isel(cond, reg, reg, kScratchReg, cr);
+        break;
+      case ne:
+      case ge:
+      case le:
+        if (reg_value != 1) __ li(reg, Operand(1));
+        // r0 implies logical zero in this form
+        __ isel(NegateCondition(cond), reg, r0, reg, cr);
+        break;
+    default:
+      UNREACHABLE();
+      break;
+    }
+  } else {
+    if (reg_value != 0) __ li(reg, Operand::Zero());
+    __ b(NegateCondition(cond), &done, cr);
+    __ li(reg, Operand(1));
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+  PPCOperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  for (size_t index = 2; index < instr->InputCount(); index += 2) {
+    __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+    __ beq(GetLabel(i.InputRpo(index + 1)));
+  }
+  AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+  PPCOperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+  Label** cases = zone()->NewArray<Label*>(case_count);
+  for (int32_t index = 0; index < case_count; ++index) {
+    cases[index] = GetLabel(i.InputRpo(index + 2));
+  }
+  Label* const table = AddJumpTable(cases, case_count);
+  __ Cmpli(input, Operand(case_count), r0);
+  __ bge(GetLabel(i.InputRpo(1)));
+  __ mov_label_addr(kScratchReg, table);
+  __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
+  __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
+  __ Jump(kScratchReg);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, bailout_type);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->IsCFunctionCall()) {
+    __ function_descriptor();
+    __ mflr(r0);
+    if (FLAG_enable_embedded_constant_pool) {
+      __ Push(r0, fp, kConstantPoolRegister);
+      // Adjust FP to point to saved FP.
+      __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+    } else {
+      __ Push(r0, fp);
+      __ mr(fp, sp);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+  } else if (frame()->needs_frame()) {
+    if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
+      // TODO(mbrandy): Restrict only to the wasm wrapper case.
+      __ StubPrologue();
+    } else {
+      __ StubPrologue(ip);
+    }
+  } else {
+    frame()->SetElidedFrameSizeInSlots(0);
+  }
+  frame_access_state()->SetFrameAccessToDefault();
+
+  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  if (info()->is_osr()) {
+    // TurboFan OSR-compiled functions cannot be entered directly.
+    __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+    // Unoptimized code jumps directly to this entrypoint while the unoptimized
+    // frame is still on the stack. Optimized code uses OSR values directly from
+    // the unoptimized frame. Thus, all that needs to be done is to allocate the
+    // remaining stack slots.
+    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+    osr_pc_offset_ = __ pc_offset();
+    // TODO(titzer): cannot address target function == local #-1
+    __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+  }
+
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+  }
+  if (stack_shrink_slots > 0) {
+    __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
+  }
+
+  // Save callee-saved Double registers.
+  if (double_saves != 0) {
+    __ MultiPushDoubles(double_saves);
+    DCHECK(kNumCalleeSavedDoubles ==
+           base::bits::CountPopulation32(double_saves));
+    frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+                                              (kDoubleSize / kPointerSize));
+  }
+
+  // Save callee-saved registers.
+  const RegList saves =
+      FLAG_enable_embedded_constant_pool
+          ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+          : descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPush(saves);
+    // register save area does not include the fp or constant pool pointer.
+    const int num_saves =
+        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+    DCHECK(num_saves == base::bits::CountPopulation32(saves));
+    frame()->AllocateSavedCalleeRegisterSlots(num_saves);
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+  // Restore registers.
+  const RegList saves =
+      FLAG_enable_embedded_constant_pool
+          ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+          : descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPop(saves);
+  }
+
+  // Restore double registers.
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    __ MultiPopDoubles(double_saves);
+  }
+
+  if (descriptor->IsCFunctionCall()) {
+    __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+  } else if (frame()->needs_frame()) {
+    // Canonicalize JSFunction return sites for now.
+    if (return_label_.is_bound()) {
+      __ b(&return_label_);
+      return;
+    } else {
+      __ bind(&return_label_);
+      __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+    }
+  } else {
+    __ Drop(pop_count);
+  }
+  __ Ret();
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  PPCOperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Move(g.ToRegister(destination), src);
+    } else {
+      __ StoreP(src, g.ToMemOperand(destination), r0);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(g.ToRegister(destination), src, r0);
+    } else {
+      Register temp = kScratchReg;
+      __ LoadP(temp, src, r0);
+      __ StoreP(temp, g.ToMemOperand(destination), r0);
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          __ mov(dst, Operand(src.ToInt64()));
+          break;
+        case Constant::kFloat32:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject: {
+          Handle<HeapObject> src_object = src.ToHeapObject();
+          Heap::RootListIndex index;
+          int offset;
+          if (IsMaterializableFromFrame(src_object, &offset)) {
+            __ LoadP(dst, MemOperand(fp, offset));
+          } else if (IsMaterializableFromRoot(src_object, &index)) {
+            __ LoadRoot(dst, index);
+          } else {
+            __ Move(dst, src_object);
+          }
+          break;
+        }
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): loading RPO constants on PPC.
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ StoreP(dst, g.ToMemOperand(destination), r0);
+      }
+    } else {
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+                                                        : src.ToFloat64();
+      __ LoadDoubleLiteral(dst, value, kScratchReg);
+      if (destination->IsDoubleStackSlot()) {
+        __ StoreDouble(dst, g.ToMemOperand(destination), r0);
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ StoreDouble(src, g.ToMemOperand(destination), r0);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+    } else {
+      DoubleRegister temp = kScratchDoubleReg;
+      __ LoadDouble(temp, src, r0);
+      __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  PPCOperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mr(temp, src);
+      __ mr(src, dst);
+      __ mr(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mr(temp, src);
+      __ LoadP(src, dst);
+      __ StoreP(temp, dst);
+    }
+#if V8_TARGET_ARCH_PPC64
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+#endif
+    Register temp_0 = kScratchReg;
+    Register temp_1 = r0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ LoadP(temp_0, src);
+    __ LoadP(temp_1, dst);
+    __ StoreP(temp_0, dst);
+    __ StoreP(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister temp = kScratchDoubleReg;
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ fmr(temp, src);
+      __ fmr(src, dst);
+      __ fmr(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ fmr(temp, src);
+      __ lfd(src, dst);
+      __ stfd(temp, dst);
+    }
+#if !V8_TARGET_ARCH_PPC64
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    DoubleRegister temp_0 = kScratchDoubleReg;
+    DoubleRegister temp_1 = d0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ lfd(temp_0, src);
+    __ lfd(temp_1, dst);
+    __ stfd(temp_0, dst);
+    __ stfd(temp_1, src);
+#endif
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+  for (size_t index = 0; index < target_count; ++index) {
+    __ emit_label_addr(targets[index]);
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // We do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+    return;
+  }
+
+  int space_needed = Deoptimizer::patch_size();
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+    // Block tramoline pool emission for duration of padding.
+    v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+        masm());
+    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+    DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= v8::internal::Assembler::kInstrSize;
+    }
+  }
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
new file mode 100644
index 0000000..a3bf80e
--- /dev/null
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -0,0 +1,139 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// PPC-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(PPC_And)                       \
+  V(PPC_AndComplement)             \
+  V(PPC_Or)                        \
+  V(PPC_OrComplement)              \
+  V(PPC_Xor)                       \
+  V(PPC_ShiftLeft32)               \
+  V(PPC_ShiftLeft64)               \
+  V(PPC_ShiftRight32)              \
+  V(PPC_ShiftRight64)              \
+  V(PPC_ShiftRightAlg32)           \
+  V(PPC_ShiftRightAlg64)           \
+  V(PPC_RotRight32)                \
+  V(PPC_RotRight64)                \
+  V(PPC_Not)                       \
+  V(PPC_RotLeftAndMask32)          \
+  V(PPC_RotLeftAndClear64)         \
+  V(PPC_RotLeftAndClearLeft64)     \
+  V(PPC_RotLeftAndClearRight64)    \
+  V(PPC_Add)                       \
+  V(PPC_AddWithOverflow32)         \
+  V(PPC_AddDouble)                 \
+  V(PPC_Sub)                       \
+  V(PPC_SubWithOverflow32)         \
+  V(PPC_SubDouble)                 \
+  V(PPC_Mul32)                     \
+  V(PPC_Mul64)                     \
+  V(PPC_MulHigh32)                 \
+  V(PPC_MulHighU32)                \
+  V(PPC_MulDouble)                 \
+  V(PPC_Div32)                     \
+  V(PPC_Div64)                     \
+  V(PPC_DivU32)                    \
+  V(PPC_DivU64)                    \
+  V(PPC_DivDouble)                 \
+  V(PPC_Mod32)                     \
+  V(PPC_Mod64)                     \
+  V(PPC_ModU32)                    \
+  V(PPC_ModU64)                    \
+  V(PPC_ModDouble)                 \
+  V(PPC_Neg)                       \
+  V(PPC_NegDouble)                 \
+  V(PPC_SqrtDouble)                \
+  V(PPC_FloorDouble)               \
+  V(PPC_CeilDouble)                \
+  V(PPC_TruncateDouble)            \
+  V(PPC_RoundDouble)               \
+  V(PPC_MaxDouble)                 \
+  V(PPC_MinDouble)                 \
+  V(PPC_AbsDouble)                 \
+  V(PPC_Cntlz32)                   \
+  V(PPC_Cntlz64)                   \
+  V(PPC_Popcnt32)                  \
+  V(PPC_Popcnt64)                  \
+  V(PPC_Cmp32)                     \
+  V(PPC_Cmp64)                     \
+  V(PPC_CmpDouble)                 \
+  V(PPC_Tst32)                     \
+  V(PPC_Tst64)                     \
+  V(PPC_Push)                      \
+  V(PPC_PushFrame)                 \
+  V(PPC_StoreToStackSlot)          \
+  V(PPC_ExtendSignWord8)           \
+  V(PPC_ExtendSignWord16)          \
+  V(PPC_ExtendSignWord32)          \
+  V(PPC_Uint32ToUint64)            \
+  V(PPC_Int64ToInt32)              \
+  V(PPC_Int64ToFloat32)            \
+  V(PPC_Int64ToDouble)             \
+  V(PPC_Uint64ToFloat32)           \
+  V(PPC_Uint64ToDouble)            \
+  V(PPC_Int32ToDouble)             \
+  V(PPC_Uint32ToDouble)            \
+  V(PPC_Float32ToDouble)           \
+  V(PPC_DoubleToInt32)             \
+  V(PPC_DoubleToUint32)            \
+  V(PPC_DoubleToInt64)             \
+  V(PPC_DoubleToUint64)            \
+  V(PPC_DoubleToFloat32)           \
+  V(PPC_DoubleExtractLowWord32)    \
+  V(PPC_DoubleExtractHighWord32)   \
+  V(PPC_DoubleInsertLowWord32)     \
+  V(PPC_DoubleInsertHighWord32)    \
+  V(PPC_DoubleConstruct)           \
+  V(PPC_BitcastInt32ToFloat32)     \
+  V(PPC_BitcastFloat32ToInt32)     \
+  V(PPC_BitcastInt64ToDouble)      \
+  V(PPC_BitcastDoubleToInt64)      \
+  V(PPC_LoadWordS8)                \
+  V(PPC_LoadWordU8)                \
+  V(PPC_LoadWordS16)               \
+  V(PPC_LoadWordU16)               \
+  V(PPC_LoadWordS32)               \
+  V(PPC_LoadWord64)                \
+  V(PPC_LoadFloat32)               \
+  V(PPC_LoadDouble)                \
+  V(PPC_StoreWord8)                \
+  V(PPC_StoreWord16)               \
+  V(PPC_StoreWord32)               \
+  V(PPC_StoreWord64)               \
+  V(PPC_StoreFloat32)              \
+  V(PPC_StoreDouble)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
new file mode 100644
index 0000000..fc90cdd
--- /dev/null
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+    const Instruction* instr) const {
+  switch (instr->arch_opcode()) {
+    case kPPC_And:
+    case kPPC_AndComplement:
+    case kPPC_Or:
+    case kPPC_OrComplement:
+    case kPPC_Xor:
+    case kPPC_ShiftLeft32:
+    case kPPC_ShiftLeft64:
+    case kPPC_ShiftRight32:
+    case kPPC_ShiftRight64:
+    case kPPC_ShiftRightAlg32:
+    case kPPC_ShiftRightAlg64:
+    case kPPC_RotRight32:
+    case kPPC_RotRight64:
+    case kPPC_Not:
+    case kPPC_RotLeftAndMask32:
+    case kPPC_RotLeftAndClear64:
+    case kPPC_RotLeftAndClearLeft64:
+    case kPPC_RotLeftAndClearRight64:
+    case kPPC_Add:
+    case kPPC_AddWithOverflow32:
+    case kPPC_AddDouble:
+    case kPPC_Sub:
+    case kPPC_SubWithOverflow32:
+    case kPPC_SubDouble:
+    case kPPC_Mul32:
+    case kPPC_Mul64:
+    case kPPC_MulHigh32:
+    case kPPC_MulHighU32:
+    case kPPC_MulDouble:
+    case kPPC_Div32:
+    case kPPC_Div64:
+    case kPPC_DivU32:
+    case kPPC_DivU64:
+    case kPPC_DivDouble:
+    case kPPC_Mod32:
+    case kPPC_Mod64:
+    case kPPC_ModU32:
+    case kPPC_ModU64:
+    case kPPC_ModDouble:
+    case kPPC_Neg:
+    case kPPC_NegDouble:
+    case kPPC_SqrtDouble:
+    case kPPC_FloorDouble:
+    case kPPC_CeilDouble:
+    case kPPC_TruncateDouble:
+    case kPPC_RoundDouble:
+    case kPPC_MaxDouble:
+    case kPPC_MinDouble:
+    case kPPC_AbsDouble:
+    case kPPC_Cntlz32:
+    case kPPC_Cntlz64:
+    case kPPC_Popcnt32:
+    case kPPC_Popcnt64:
+    case kPPC_Cmp32:
+    case kPPC_Cmp64:
+    case kPPC_CmpDouble:
+    case kPPC_Tst32:
+    case kPPC_Tst64:
+    case kPPC_ExtendSignWord8:
+    case kPPC_ExtendSignWord16:
+    case kPPC_ExtendSignWord32:
+    case kPPC_Uint32ToUint64:
+    case kPPC_Int64ToInt32:
+    case kPPC_Int64ToFloat32:
+    case kPPC_Int64ToDouble:
+    case kPPC_Uint64ToFloat32:
+    case kPPC_Uint64ToDouble:
+    case kPPC_Int32ToDouble:
+    case kPPC_Uint32ToDouble:
+    case kPPC_Float32ToDouble:
+    case kPPC_DoubleToInt32:
+    case kPPC_DoubleToUint32:
+    case kPPC_DoubleToInt64:
+    case kPPC_DoubleToUint64:
+    case kPPC_DoubleToFloat32:
+    case kPPC_DoubleExtractLowWord32:
+    case kPPC_DoubleExtractHighWord32:
+    case kPPC_DoubleInsertLowWord32:
+    case kPPC_DoubleInsertHighWord32:
+    case kPPC_DoubleConstruct:
+    case kPPC_BitcastInt32ToFloat32:
+    case kPPC_BitcastFloat32ToInt32:
+    case kPPC_BitcastInt64ToDouble:
+    case kPPC_BitcastDoubleToInt64:
+      return kNoOpcodeFlags;
+
+    case kPPC_LoadWordS8:
+    case kPPC_LoadWordU8:
+    case kPPC_LoadWordS16:
+    case kPPC_LoadWordU16:
+    case kPPC_LoadWordS32:
+    case kPPC_LoadWord64:
+    case kPPC_LoadFloat32:
+    case kPPC_LoadDouble:
+      return kIsLoadOperation;
+
+    case kPPC_StoreWord8:
+    case kPPC_StoreWord16:
+    case kPPC_StoreWord32:
+    case kPPC_StoreWord64:
+    case kPPC_StoreFloat32:
+    case kPPC_StoreDouble:
+    case kPPC_Push:
+    case kPPC_PushFrame:
+    case kPPC_StoreToStackSlot:
+      return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+    COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+      // Already covered in architecture independent code.
+      UNREACHABLE();
+  }
+
+  UNREACHABLE();
+  return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+  // TODO(all): Add instruction cost modeling.
+  return 1;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
new file mode 100644
index 0000000..f6ebbdf
--- /dev/null
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -0,0 +1,1772 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/ppc/frames-ppc.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kInt16Imm,
+  kInt16Imm_Unsigned,
+  kInt16Imm_Negate,
+  kInt16Imm_4ByteAligned,
+  kShift32Imm,
+  kShift64Imm,
+  kNoImmediate
+};
+
+
+// Adds PPC-specific methods for generating operands.
+class PPCOperandGenerator final : public OperandGenerator {
+ public:
+  explicit PPCOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    return CanBeImmediate(value, mode);
+  }
+
+  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+    switch (mode) {
+      case kInt16Imm:
+        return is_int16(value);
+      case kInt16Imm_Unsigned:
+        return is_uint16(value);
+      case kInt16Imm_Negate:
+        return is_int16(-value);
+      case kInt16Imm_4ByteAligned:
+        return is_int16(value) && !(value & 3);
+      case kShift32Imm:
+        return 0 <= value && value < 32;
+      case kShift64Imm:
+        return 0 <= value && value < 64;
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+};
+
+
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  PPCOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  PPCOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+              ImmediateMode operand_mode) {
+  PPCOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  PPCOperandGenerator g(selector);
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+  outputs[output_count++] = g.DefineAsRegister(node);
+
+  Node* success_output = NodeProperties::FindProjection(node, 1);
+  if (success_output) {
+    outputs[output_count++] = g.DefineAsRegister(success_output);
+  }
+
+  selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode,
+                FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  Matcher m(node);
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0u, input_count);
+  DCHECK_NE(0u, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+                 inputs);
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                ImmediateMode operand_mode) {
+  FlagsContinuation cont;
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+}  // namespace
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  PPCOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  ImmediateMode mode = kInt16Imm;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kPPC_LoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kPPC_LoadDouble;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+      break;
+#if !V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kTagged:  // Fall through.
+#endif
+    case MachineRepresentation::kWord32:
+      opcode = kPPC_LoadWordS32;
+#if V8_TARGET_ARCH_PPC64
+      // TODO(mbrandy): this applies to signed loads only (lwa)
+      mode = kInt16Imm_4ByteAligned;
+#endif
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kPPC_LoadWord64;
+      mode = kInt16Imm_4ByteAligned;
+      break;
+#else
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(offset, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+  } else if (g.CanBeImmediate(base, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  PPCOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+  MachineRepresentation rep = store_rep.representation();
+
+  // TODO(ppc): I guess this could be done in a better way.
+  if (write_barrier_kind != kNoWriteBarrier) {
+    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    InstructionOperand inputs[3];
+    size_t input_count = 0;
+    inputs[input_count++] = g.UseUniqueRegister(base);
+    inputs[input_count++] = g.UseUniqueRegister(offset);
+    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+                                ? g.UseRegister(value)
+                                : g.UseUniqueRegister(value);
+    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+    switch (write_barrier_kind) {
+      case kNoWriteBarrier:
+        UNREACHABLE();
+        break;
+      case kMapWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsMap;
+        break;
+      case kPointerWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsPointer;
+        break;
+      case kFullWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsAny;
+        break;
+    }
+    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+    size_t const temp_count = arraysize(temps);
+    InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= MiscField::encode(static_cast<int>(record_write_mode));
+    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+  } else {
+    ArchOpcode opcode = kArchNop;
+    ImmediateMode mode = kInt16Imm;
+    switch (rep) {
+      case MachineRepresentation::kFloat32:
+        opcode = kPPC_StoreFloat32;
+        break;
+      case MachineRepresentation::kFloat64:
+        opcode = kPPC_StoreDouble;
+        break;
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = kPPC_StoreWord8;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = kPPC_StoreWord16;
+        break;
+#if !V8_TARGET_ARCH_PPC64
+      case MachineRepresentation::kTagged:  // Fall through.
+#endif
+      case MachineRepresentation::kWord32:
+        opcode = kPPC_StoreWord32;
+        break;
+#if V8_TARGET_ARCH_PPC64
+      case MachineRepresentation::kTagged:  // Fall through.
+      case MachineRepresentation::kWord64:
+        opcode = kPPC_StoreWord64;
+        mode = kInt16Imm_4ByteAligned;
+        break;
+#else
+      case MachineRepresentation::kWord64:  // Fall through.
+#endif
+      case MachineRepresentation::kNone:
+        UNREACHABLE();
+        return;
+    }
+    if (g.CanBeImmediate(offset, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+    } else if (g.CanBeImmediate(base, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+    } else {
+      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+           g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+    }
+  }
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+  PPCOperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedLoadWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+  PPCOperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+#if V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedStoreWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+       g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+                         ArchOpcode opcode, bool left_can_cover,
+                         bool right_can_cover, ImmediateMode imm_mode) {
+  PPCOperandGenerator g(selector);
+
+  // Map instruction to equivalent operation with inverted right input.
+  ArchOpcode inv_opcode = opcode;
+  switch (opcode) {
+    case kPPC_And:
+      inv_opcode = kPPC_AndComplement;
+      break;
+    case kPPC_Or:
+      inv_opcode = kPPC_OrComplement;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+    Matcher mleft(m->left().node());
+    if (mleft.right().Is(-1)) {
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->right().node()),
+                     g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+
+  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+      right_can_cover) {
+    Matcher mright(m->right().node());
+    if (mright.right().Is(-1)) {
+      // TODO(all): support shifted operand on right.
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->left().node()),
+                     g.UseRegister(mright.left().node()));
+      return;
+    }
+  }
+
+  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation32(value);
+  int mask_msb = base::bits::CountLeadingZeros32(value);
+  int mask_lsb = base::bits::CountTrailingZeros32(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation64(value);
+  int mask_msb = base::bits::CountLeadingZeros64(value);
+  int mask_lsb = base::bits::CountTrailingZeros64(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+#endif
+
+
+// TODO(mbrandy): Absorb rotate-right into rlwinm?
+void InstructionSelector::VisitWord32And(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+        CanCover(node, left)) {
+      // Try to absorb left/right shift into rlwinm
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 31)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord32Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 31 - sh) mb = 31 - sh;
+          sh = (32 - sh) & 0x1f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
+           g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
+      return;
+    }
+  }
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb rotate-right into rldic?
+void InstructionSelector::VisitWord64And(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+        CanCover(node, left)) {
+      // Try to absorb left/right shift into rldic
+      Int64BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 63)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord64Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 63 - sh) mb = 63 - sh;
+          sh = (64 - sh) & 0x3f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      bool match = false;
+      ArchOpcode opcode;
+      int mask;
+      if (me == 0) {
+        match = true;
+        opcode = kPPC_RotLeftAndClearLeft64;
+        mask = mb;
+      } else if (mb == 63) {
+        match = true;
+        opcode = kPPC_RotLeftAndClearRight64;
+        mask = me;
+      } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+        match = true;
+        opcode = kPPC_RotLeftAndClear64;
+        mask = mb;
+      }
+      if (match) {
+        Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+             g.TempImmediate(sh), g.TempImmediate(mask));
+        return;
+      }
+    }
+  }
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  Int32BinopMatcher m(node);
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Or(Node* node) {
+  Int64BinopMatcher m(node);
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
+  }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
+  }
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    // Try to absorb logical-and into rlwinm
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // TODO(mbrandy): eliminate left sign extension if right >= 32
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    // Try to absorb logical-and into rldic
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kPPC_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kPPC_RotLeftAndClearRight64;
+          mask = me;
+        } else if (sh && me <= sh) {
+          match = true;
+          opcode = kPPC_RotLeftAndClear64;
+          mask = mb;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    // Try to absorb logical-and into rlwinm
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 31 - sh) mb = 31 - sh;
+      sh = (32 - sh) & 0x1f;
+      if (mb >= me) {
+        Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    // Try to absorb logical-and into rldic
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 63 - sh) mb = 63 - sh;
+      sh = (64 - sh) & 0x3f;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kPPC_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kPPC_RotLeftAndClearRight64;
+          mask = me;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+  VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
+}
+#endif
+
+
+// TODO(mbrandy): Absorb logical-and into rlwinm?
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb logical-and into rldic?
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Clz(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  PPCOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
+  }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
+  }
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kPPC_Mul32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kPPC_Mul64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kPPC_Div32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kPPC_Div64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  VisitRRR(this, kPPC_DivU32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Div(Node* node) {
+  VisitRRR(this, kPPC_DivU64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kPPC_Mod32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kPPC_Mod64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  VisitRRR(this, kPPC_ModU32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Mod(Node* node) {
+  VisitRRR(this, kPPC_ModU64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  VisitRR(this, kPPC_Float32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  VisitRR(this, kPPC_Int32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  VisitRR(this, kPPC_Uint32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  VisitRR(this, kPPC_DoubleToInt32, node);
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  VisitRR(this, kPPC_DoubleToUint32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kPPC_ExtendSignWord32, node);
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kPPC_Uint32ToUint64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  VisitRR(this, kPPC_DoubleToFloat32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+  switch (TruncationModeOf(node->op())) {
+    case TruncationMode::kJavaScript:
+      return VisitRR(this, kArchTruncateDoubleToI, node);
+    case TruncationMode::kRoundToZero:
+      return VisitRR(this, kPPC_DoubleToInt32, node);
+  }
+  UNREACHABLE();
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kPPC_Int64ToInt32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+  VisitRR(this, kPPC_Int64ToDouble, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+  VisitRR(this, kPPC_Uint64ToDouble, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+  VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+  VisitRR(this, kPPC_BitcastDoubleToInt64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+  VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+  VisitRR(this, kPPC_BitcastInt64ToDouble, node);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+  VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  // TODO(mbrandy): detect multiply-add
+  VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+  PPCOperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kPPC_SubDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  // TODO(mbrandy): detect multiply-subtract
+  PPCOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    if (m.right().IsFloat64RoundDown() &&
+        CanCover(m.node(), m.right().node())) {
+      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+          CanCover(m.right().node(), m.right().InputAt(0))) {
+        Float64BinopMatcher mright0(m.right().InputAt(0));
+        if (mright0.left().IsMinusZero()) {
+          // -floor(-x) = ceil(x)
+          Emit(kPPC_CeilDouble, g.DefineAsRegister(node),
+               g.UseRegister(mright0.right().node()));
+          return;
+        }
+      }
+    }
+    Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kPPC_SubDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+  VisitRRR(this, kPPC_MulDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  // TODO(mbrandy): detect negate
+  VisitRRR(this, kPPC_MulDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+  VisitRRR(this, kPPC_DivDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kPPC_DivDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
+       g.UseFixed(node->InputAt(0), d1),
+       g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+  VisitRR(this, kPPC_AbsDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+  VisitRR(this, kPPC_AbsDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+  VisitRR(this, kPPC_SqrtDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  VisitRR(this, kPPC_SqrtDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+  VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+  VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+  VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+  VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+  VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRR(this, kPPC_RoundDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
+                                         kInt16Imm, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
+                                &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+                                         kInt16Imm_Negate, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+                                kInt16Imm_Negate, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+                                         &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
+                                         &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+
+static bool CompareLogical(FlagsContinuation* cont) {
+  switch (cont->condition()) {
+    case kUnsignedLessThan:
+    case kUnsignedGreaterThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+    case kUnsignedGreaterThan:
+      return true;
+    default:
+      return false;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative, ImmediateMode immediate_mode) {
+  PPCOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, immediate_mode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, immediate_mode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
+}
+#endif
+
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_PPC64
+      case IrOpcode::kWord64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+#endif
+      case IrOpcode::kFloat32Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (ProjectionIndexOf(value->op()) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either nullptr, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = NodeProperties::FindProjection(node, 0);
+          if (result == nullptr || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(selector, node,
+                                                     kPPC_SubWithOverflow32,
+                                                     kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_PPC64
+              case IrOpcode::kInt64AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+                                                     kInt16Imm, cont);
+              case IrOpcode::kInt64SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+                                                     kInt16Imm_Negate, cont);
+#endif
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Sub:
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kWord32And:
+        // TODO(mbandy): opportunity for rlwinm?
+        return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_PPC64
+      case IrOpcode::kInt64Sub:
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kWord64And:
+        // TODO(mbandy): opportunity for rldic?
+        return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  PPCOperandGenerator g(selector);
+  VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+               cont);
+}
+
+
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
+}
+#endif
+
+}  // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+  PPCOperandGenerator g(this);
+  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+  // Emit either ArchTableSwitch or ArchLookupSwitch.
+  size_t table_space_cost = 4 + sw.value_range;
+  size_t table_time_cost = 3;
+  size_t lookup_space_cost = 3 + 2 * sw.case_count;
+  size_t lookup_time_cost = sw.case_count;
+  if (sw.case_count > 0 &&
+      table_space_cost + 3 * table_time_cost <=
+          lookup_space_cost + 3 * lookup_time_cost &&
+      sw.min_value > std::numeric_limits<int32_t>::min()) {
+    InstructionOperand index_operand = value_operand;
+    if (sw.min_value) {
+      index_operand = g.TempRegister();
+      Emit(kPPC_Sub, index_operand, value_operand,
+           g.TempImmediate(sw.min_value));
+    }
+    // Generate a table lookup.
+    return EmitTableSwitch(sw, index_operand);
+  }
+
+  // Generate a sequence of conditional jumps.
+  return EmitLookupSwitch(sw, value_operand);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+    Node* node) {
+  PPCOperandGenerator g(this);
+
+  // Prepare for C function call.
+  if (descriptor->IsCFunctionCall()) {
+    Emit(kArchPrepareCallCFunction |
+             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+         0, nullptr, 0, nullptr);
+
+    // Poke any stack arguments.
+    int slot = kStackFrameExtraParamSlot;
+    for (PushParameter input : (*arguments)) {
+      Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+           g.TempImmediate(slot));
+      ++slot;
+    }
+  } else {
+    // Push any stack arguments.
+    int num_slots = static_cast<int>(descriptor->StackParameterCount());
+    int slot = 0;
+    for (PushParameter input : (*arguments)) {
+      if (slot == 0) {
+        DCHECK(input.node());
+        Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
+             g.TempImmediate(num_slots));
+      } else {
+        // Skip any alignment holes in pushed nodes.
+        if (input.node()) {
+          Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+               g.TempImmediate(slot));
+        }
+      }
+      ++slot;
+    }
+  }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+  PPCOperandGenerator g(this);
+  Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+  PPCOperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
+         g.UseRegister(right));
+    return;
+  }
+  Emit(kPPC_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+  PPCOperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
+         g.UseRegister(left));
+    return;
+  }
+  Emit(kPPC_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat32RoundDown |
+         MachineOperatorBuilder::kFloat64RoundDown |
+         MachineOperatorBuilder::kFloat32RoundUp |
+         MachineOperatorBuilder::kFloat64RoundUp |
+         MachineOperatorBuilder::kFloat32RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord64Popcnt;
+  // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8